Godeps, vendor: convert dependency management to trash (#3198)
This commit converts the dependency management from Godeps to the vendor folder, also switching the tool from godep to trash. Since the upstream tool lacks a few features proposed via a few PRs, until those PRs are merged in (if), use github.com/karalabe/trash. You can update dependencies via trash --update. All dependencies have been updated to their latest version. Parts of the build system are reworked to drop old notions of Godeps and invocation of the go vet command so that it doesn't run against the vendor folder, as that will just blow up during vetting. The conversion drops OpenCL (and hence GPU mining support) from ethash and our codebase. The short reasoning is that there's noone to maintain and having opencl libs in our deps messes up builds as go install ./... tries to build them, failing with unsatisfied link errors for the C OpenCL deps. golang.org/x/net/context is not vendored in. We expect it to be fetched by the user (i.e. using go get). To keep ci.go builds reproducible the package is "vendored" in build/_vendor.
This commit is contained in:
parent
7770304576
commit
289b30715d
|
@ -13,8 +13,7 @@
|
|||
.ethtest
|
||||
*/**/*tx_database*
|
||||
*/**/*dapps*
|
||||
Godeps/_workspace/pkg
|
||||
Godeps/_workspace/bin
|
||||
build/_vendor/pkg
|
||||
|
||||
#*
|
||||
.#*
|
||||
|
|
|
@ -1,332 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/ethereum/go-ethereum",
|
||||
"GoVersion": "go1.7",
|
||||
"GodepVersion": "v74",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Gustav-Simonsson/go-opencl/cl",
|
||||
"Rev": "593e01cfc4f3353585015321e01951d4a907d3ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cespare/cp",
|
||||
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/ethash",
|
||||
"Comment": "v23.1-247-g2e80de5",
|
||||
"Rev": "2e80de5022370cfe632195b1720db52d07ff8a77"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/color",
|
||||
"Comment": "v0.1-12-g9aae6aa",
|
||||
"Rev": "9aae6aaa22315390f03959adca2c4d395b02fcef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gizak/termui",
|
||||
"Rev": "08a5d3f67b7d9ec87830ea39c48e570a1f18531f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "799c780093d646c1b79d30894e22512c319fa137"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway1",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway2",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/httpu",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/scpd",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/soap",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/ssdp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/gateway",
|
||||
"Rev": "192609c58b8985e645cbe82ddcb28a4362ca0fdc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/go-nat-pmp",
|
||||
"Rev": "46523a463303c6ede3ddfe45bde1c7ed52ebaacd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-colorable",
|
||||
"Rev": "9fdad7c47650b7d2e1da50644c1f4ba7f172f252"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-runewidth",
|
||||
"Comment": "travisish-44-ge882a96",
|
||||
"Rev": "e882a96ec18dd43fa283187b66af74497c9101c0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/nsf/termbox-go",
|
||||
"Rev": "362329b0aa6447eadd52edd8d660ec1dff470295"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Comment": "v1.0-6-g0f1a469",
|
||||
"Rev": "0f1a46960a86dcdf5dd30d3e6568a497a997909f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterh/liner",
|
||||
"Rev": "ad1edfd30321d8f006ccf05f1e0524adeb943060"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
||||
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rcrowley/go-metrics/exp",
|
||||
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rjeczalik/notify",
|
||||
"Rev": "f627deca7a510d96f0ef9388f2d0e8b16d21f87f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/ast",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/dbg",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/file",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/parser",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/registry",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/token",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rs/cors",
|
||||
"Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rs/xhandler",
|
||||
"Rev": "d9d9599b6aaf6a058cb7b1f48291ded2cbd13390"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
|
||||
"Rev": "6b4daa5362b502898ddf367c5c11deb9e7a5c727"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "351dc6a5bf92a5f2ae22fadeee08eb6a45aa2d93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ripemd160",
|
||||
"Rev": "351dc6a5bf92a5f2ae22fadeee08eb6a45aa2d93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "351dc6a5bf92a5f2ae22fadeee08eb6a45aa2d93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "6250b412798208e6c90b03b7c4f226de5aa299e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html",
|
||||
"Rev": "6250b412798208e6c90b03b7c4f226de5aa299e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/atom",
|
||||
"Rev": "6250b412798208e6c90b03b7c4f226de5aa299e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/charset",
|
||||
"Rev": "6250b412798208e6c90b03b7c4f226de5aa299e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/websocket",
|
||||
"Rev": "6250b412798208e6c90b03b7c4f226de5aa299e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "a646d33e2ee3172a661fc09bca23bb4889a41bc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/charmap",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/htmlindex",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal/identifier",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/japanese",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/korean",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/simplifiedchinese",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/traditionalchinese",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/unicode",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/tag",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/utf8internal",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/language",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/runes",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "d69c40b4be55797923cec7457fac7a244d91a9b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||
"Rev": "9deed8c6c1c89e0b6d68d727f215de8e851d1064"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/imports",
|
||||
"Rev": "9deed8c6c1c89e0b6d68d727f215de8e851d1064"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "4f90aeace3a26ad7021961c297b22c42160c7b25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fatih/set.v0",
|
||||
"Comment": "v0.1.0-3-g27c4092",
|
||||
"Rev": "27c40922c40b43fe04554d8223a402af3ea333f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
|
||||
"Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/natefinch/npipe.v2",
|
||||
"Rev": "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/urfave/cli.v1",
|
||||
"Comment": "v1.17.0",
|
||||
"Rev": "01857ac33766ce0c93856370626f9799281c14f4"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -1,2 +0,0 @@
|
|||
/pkg
|
||||
/bin
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Package cl provides a binding to the OpenCL api. It's mostly a low-level
|
||||
wrapper that avoids adding functionality while still making the interface
|
||||
a little more friendly and easy to use.
|
||||
|
||||
Resource life-cycle management:
|
||||
|
||||
For any CL object that gets created (buffer, queue, kernel, etc..) you should
|
||||
call object.Release() when finished with it to free the CL resources. This
|
||||
explicitely calls the related clXXXRelease method for the type. However,
|
||||
as a fallback there is a finalizer set for every resource item that takes
|
||||
care of it (eventually) if Release isn't called. In this way you can have
|
||||
better control over the life cycle of resources while having a fall back
|
||||
to avoid leaks. This is similar to how file handles and such are handled
|
||||
in the Go standard packages.
|
||||
*/
|
||||
package cl
|
||||
|
||||
// #include "headers/1.2/opencl.h"
|
||||
// #cgo CFLAGS: -Iheaders/1.2
|
||||
// #cgo darwin LDFLAGS: -framework OpenCL
|
||||
// #cgo linux LDFLAGS: -lOpenCL
|
||||
import "C"
|
||||
import "errors"
|
||||
|
||||
var ErrUnsupported = errors.New("cl: unsupported")
|
|
@ -1,161 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxImageFormats = 256
|
||||
|
||||
type Context struct {
|
||||
clContext C.cl_context
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
type MemObject struct {
|
||||
clMem C.cl_mem
|
||||
size int
|
||||
}
|
||||
|
||||
func releaseContext(c *Context) {
|
||||
if c.clContext != nil {
|
||||
C.clReleaseContext(c.clContext)
|
||||
c.clContext = nil
|
||||
}
|
||||
}
|
||||
|
||||
func releaseMemObject(b *MemObject) {
|
||||
if b.clMem != nil {
|
||||
C.clReleaseMemObject(b.clMem)
|
||||
b.clMem = nil
|
||||
}
|
||||
}
|
||||
|
||||
func newMemObject(mo C.cl_mem, size int) *MemObject {
|
||||
memObject := &MemObject{clMem: mo, size: size}
|
||||
runtime.SetFinalizer(memObject, releaseMemObject)
|
||||
return memObject
|
||||
}
|
||||
|
||||
func (b *MemObject) Release() {
|
||||
releaseMemObject(b)
|
||||
}
|
||||
|
||||
// TODO: properties
|
||||
func CreateContext(devices []*Device) (*Context, error) {
|
||||
deviceIds := buildDeviceIdList(devices)
|
||||
var err C.cl_int
|
||||
clContext := C.clCreateContext(nil, C.cl_uint(len(devices)), &deviceIds[0], nil, nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clContext == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
context := &Context{clContext: clContext, devices: devices}
|
||||
runtime.SetFinalizer(context, releaseContext)
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) GetSupportedImageFormats(flags MemFlag, imageType MemObjectType) ([]ImageFormat, error) {
|
||||
var formats [maxImageFormats]C.cl_image_format
|
||||
var nFormats C.cl_uint
|
||||
if err := C.clGetSupportedImageFormats(ctx.clContext, C.cl_mem_flags(flags), C.cl_mem_object_type(imageType), maxImageFormats, &formats[0], &nFormats); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
fmts := make([]ImageFormat, nFormats)
|
||||
for i, f := range formats[:nFormats] {
|
||||
fmts[i] = ImageFormat{
|
||||
ChannelOrder: ChannelOrder(f.image_channel_order),
|
||||
ChannelDataType: ChannelDataType(f.image_channel_data_type),
|
||||
}
|
||||
}
|
||||
return fmts, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateCommandQueue(device *Device, properties CommandQueueProperty) (*CommandQueue, error) {
|
||||
var err C.cl_int
|
||||
clQueue := C.clCreateCommandQueue(ctx.clContext, device.id, C.cl_command_queue_properties(properties), &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clQueue == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
commandQueue := &CommandQueue{clQueue: clQueue, device: device}
|
||||
runtime.SetFinalizer(commandQueue, releaseCommandQueue)
|
||||
return commandQueue, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateProgramWithSource(sources []string) (*Program, error) {
|
||||
cSources := make([]*C.char, len(sources))
|
||||
for i, s := range sources {
|
||||
cs := C.CString(s)
|
||||
cSources[i] = cs
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
}
|
||||
var err C.cl_int
|
||||
clProgram := C.clCreateProgramWithSource(ctx.clContext, C.cl_uint(len(sources)), &cSources[0], nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clProgram == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
program := &Program{clProgram: clProgram, devices: ctx.devices}
|
||||
runtime.SetFinalizer(program, releaseProgram)
|
||||
return program, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBufferUnsafe(flags MemFlag, size int, dataPtr unsafe.Pointer) (*MemObject, error) {
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateBuffer(ctx.clContext, C.cl_mem_flags(flags), C.size_t(size), dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, size), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBuffer(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBufferFloat32(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBuffer(flags MemFlag, data []byte) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
//float64
|
||||
func (ctx *Context) CreateBufferFloat32(flags MemFlag, data []float32) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateUserEvent() (*Event, error) {
|
||||
var err C.cl_int
|
||||
clEvent := C.clCreateUserEvent(ctx.clContext, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(clEvent), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) Release() {
|
||||
releaseContext(ctx)
|
||||
}
|
||||
|
||||
// http://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/clCreateSubBuffer.html
|
||||
// func (memObject *MemObject) CreateSubBuffer(flags MemFlag, bufferCreateType BufferCreateType, )
|
|
@ -1,510 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #include "cl_ext.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxDeviceCount = 64
|
||||
|
||||
type DeviceType uint
|
||||
|
||||
const (
|
||||
DeviceTypeCPU DeviceType = C.CL_DEVICE_TYPE_CPU
|
||||
DeviceTypeGPU DeviceType = C.CL_DEVICE_TYPE_GPU
|
||||
DeviceTypeAccelerator DeviceType = C.CL_DEVICE_TYPE_ACCELERATOR
|
||||
DeviceTypeDefault DeviceType = C.CL_DEVICE_TYPE_DEFAULT
|
||||
DeviceTypeAll DeviceType = C.CL_DEVICE_TYPE_ALL
|
||||
)
|
||||
|
||||
type FPConfig int
|
||||
|
||||
const (
|
||||
FPConfigDenorm FPConfig = C.CL_FP_DENORM // denorms are supported
|
||||
FPConfigInfNaN FPConfig = C.CL_FP_INF_NAN // INF and NaNs are supported
|
||||
FPConfigRoundToNearest FPConfig = C.CL_FP_ROUND_TO_NEAREST // round to nearest even rounding mode supported
|
||||
FPConfigRoundToZero FPConfig = C.CL_FP_ROUND_TO_ZERO // round to zero rounding mode supported
|
||||
FPConfigRoundToInf FPConfig = C.CL_FP_ROUND_TO_INF // round to positive and negative infinity rounding modes supported
|
||||
FPConfigFMA FPConfig = C.CL_FP_FMA // IEEE754-2008 fused multiply-add is supported
|
||||
FPConfigSoftFloat FPConfig = C.CL_FP_SOFT_FLOAT // Basic floating-point operations (such as addition, subtraction, multiplication) are implemented in software
|
||||
)
|
||||
|
||||
var fpConfigNameMap = map[FPConfig]string{
|
||||
FPConfigDenorm: "Denorm",
|
||||
FPConfigInfNaN: "InfNaN",
|
||||
FPConfigRoundToNearest: "RoundToNearest",
|
||||
FPConfigRoundToZero: "RoundToZero",
|
||||
FPConfigRoundToInf: "RoundToInf",
|
||||
FPConfigFMA: "FMA",
|
||||
FPConfigSoftFloat: "SoftFloat",
|
||||
}
|
||||
|
||||
func (c FPConfig) String() string {
|
||||
var parts []string
|
||||
for bit, name := range fpConfigNameMap {
|
||||
if c&bit != 0 {
|
||||
parts = append(parts, name)
|
||||
}
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
func (dt DeviceType) String() string {
|
||||
var parts []string
|
||||
if dt&DeviceTypeCPU != 0 {
|
||||
parts = append(parts, "CPU")
|
||||
}
|
||||
if dt&DeviceTypeGPU != 0 {
|
||||
parts = append(parts, "GPU")
|
||||
}
|
||||
if dt&DeviceTypeAccelerator != 0 {
|
||||
parts = append(parts, "Accelerator")
|
||||
}
|
||||
if dt&DeviceTypeDefault != 0 {
|
||||
parts = append(parts, "Default")
|
||||
}
|
||||
if parts == nil {
|
||||
parts = append(parts, "None")
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
id C.cl_device_id
|
||||
}
|
||||
|
||||
func buildDeviceIdList(devices []*Device) []C.cl_device_id {
|
||||
deviceIds := make([]C.cl_device_id, len(devices))
|
||||
for i, d := range devices {
|
||||
deviceIds[i] = d.id
|
||||
}
|
||||
return deviceIds
|
||||
}
|
||||
|
||||
// Obtain the list of devices available on a platform. 'platform' refers
|
||||
// to the platform returned by GetPlatforms or can be nil. If platform
|
||||
// is nil, the behavior is implementation-defined.
|
||||
func GetDevices(platform *Platform, deviceType DeviceType) ([]*Device, error) {
|
||||
var deviceIds [maxDeviceCount]C.cl_device_id
|
||||
var numDevices C.cl_uint
|
||||
var platformId C.cl_platform_id
|
||||
if platform != nil {
|
||||
platformId = platform.id
|
||||
}
|
||||
if err := C.clGetDeviceIDs(platformId, C.cl_device_type(deviceType), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if numDevices > maxDeviceCount {
|
||||
numDevices = maxDeviceCount
|
||||
}
|
||||
devices := make([]*Device, numDevices)
|
||||
for i := 0; i < int(numDevices); i++ {
|
||||
devices[i] = &Device{id: deviceIds[i]}
|
||||
}
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (d *Device) nullableId() C.cl_device_id {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.id
|
||||
}
|
||||
|
||||
func (d *Device) GetInfoString(param C.cl_device_info, panicOnError bool) (string, error) {
|
||||
var strC [1024]C.char
|
||||
var strN C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, 1024, unsafe.Pointer(&strC), &strN); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return "", toError(err)
|
||||
}
|
||||
|
||||
// OpenCL strings are NUL-terminated, and the terminator is included in strN
|
||||
// Go strings aren't NUL-terminated, so subtract 1 from the length
|
||||
return C.GoStringN((*C.char)(unsafe.Pointer(&strC)), C.int(strN-1)), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUint(param C.cl_device_info, panicOnError bool) (uint, error) {
|
||||
var val C.cl_uint
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return uint(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoSize(param C.cl_device_info, panicOnError bool) (int, error) {
|
||||
var val C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUlong(param C.cl_device_info, panicOnError bool) (int64, error) {
|
||||
var val C.cl_ulong
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoBool(param C.cl_device_info, panicOnError bool) (bool, error) {
|
||||
var val C.cl_bool
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return false, toError(err)
|
||||
}
|
||||
return val == C.CL_TRUE, nil
|
||||
}
|
||||
|
||||
func (d *Device) Name() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_NAME, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Vendor() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VENDOR, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Extensions() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_EXTENSIONS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) OpenCLCVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_OPENCL_C_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Profile() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_PROFILE, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Version() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) DriverVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DRIVER_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// The default compute device address space size specified as an
|
||||
// unsigned integer value in bits. Currently supported values are 32 or 64 bits.
|
||||
func (d *Device) AddressBits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_ADDRESS_BITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of global memory cache line in bytes.
|
||||
func (d *Device) GlobalMemCachelineSize() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum configured clock frequency of the device in MHz.
|
||||
func (d *Device) MaxClockFrequency() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CLOCK_FREQUENCY, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The number of parallel compute units on the OpenCL device.
|
||||
// A work-group executes on a single compute unit. The minimum value is 1.
|
||||
func (d *Device) MaxComputeUnits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_COMPUTE_UNITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of arguments declared with the __constant qualifier in a kernel.
|
||||
// The minimum value is 8 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CONSTANT_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be read by a kernel.
|
||||
// The minimum value is 128 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxReadImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_READ_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of samplers that can be used in a kernel. The minimum
|
||||
// value is 16 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE. (Also see sampler_t.)
|
||||
func (d *Device) MaxSamplers() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_SAMPLERS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum dimensions that specify the global and local work-item IDs used
|
||||
// by the data parallel execution model. (Refer to clEnqueueNDRangeKernel).
|
||||
// The minimum value is 3 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemDimensions() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be written to by a
|
||||
// kernel. The minimum value is 8 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxWriteImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WRITE_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The minimum value is the size (in bits) of the largest OpenCL built-in
|
||||
// data type supported by the device (long16 in FULL profile, long16 or
|
||||
// int16 in EMBEDDED profile) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MemBaseAddrAlign() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MEM_BASE_ADDR_ALIGN, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthChar() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthShort() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthInt() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthLong() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthFloat() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthDouble() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthHalf() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 2D image in pixels. The minimum value is 8192
|
||||
// if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 2D image or 1D image not created from a buffer object in
|
||||
// pixels. The minimum value is 8192 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max depth of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxDepth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_DEPTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max size in bytes of the arguments that can be passed to a kernel. The
|
||||
// minimum value is 1024 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
// For this minimum value, only a maximum of 128 arguments can be passed to a kernel.
|
||||
func (d *Device) MaxParameterSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_PARAMETER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of work-items in a work-group executing a kernel on a
|
||||
// single compute unit, using the data parallel execution model. (Refer
|
||||
// to clEnqueueNDRangeKernel). The minimum value is 1.
|
||||
func (d *Device) MaxWorkGroupSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_WORK_GROUP_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Describes the resolution of device timer. This is measured in nanoseconds.
|
||||
func (d *Device) ProfilingTimerResolution() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_PROFILING_TIMER_RESOLUTION, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of local memory arena in bytes. The minimum value is 32 KB for
|
||||
// devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) LocalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_LOCAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size in bytes of a constant buffer allocation. The minimum value is
|
||||
// 64 KB for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantBufferSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size of memory object allocation in bytes. The minimum value is max
|
||||
// (1/4th of CL_DEVICE_GLOBAL_MEM_SIZE, 128*1024*1024) for devices that are
|
||||
// not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxMemAllocSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_MEM_ALLOC_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Size of global device memory in bytes.
|
||||
func (d *Device) GlobalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_GLOBAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Available() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) CompilerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_COMPILER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) EndianLittle() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ENDIAN_LITTLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Is CL_TRUE if the device implements error correction for all
|
||||
// accesses to compute device memory (global and constant). Is
|
||||
// CL_FALSE if the device does not implement such error correction.
|
||||
func (d *Device) ErrorCorrectionSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ERROR_CORRECTION_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) HostUnifiedMemory() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_HOST_UNIFIED_MEMORY, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ImageSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_IMAGE_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Type() DeviceType {
|
||||
var deviceType C.cl_device_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_TYPE, C.size_t(unsafe.Sizeof(deviceType)), unsafe.Pointer(&deviceType), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get device type")
|
||||
}
|
||||
return DeviceType(deviceType)
|
||||
}
|
||||
|
||||
// Describes double precision floating-point capability of the OpenCL device
|
||||
func (d *Device) DoubleFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_DOUBLE_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get double FP config")
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Describes the OPTIONAL half precision floating-point capability of the OpenCL device
|
||||
func (d *Device) HalfFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_HALF_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil)
|
||||
if err != C.CL_SUCCESS {
|
||||
return FPConfig(0)
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Type of local memory supported. This can be set to CL_LOCAL implying dedicated
|
||||
// local memory storage such as SRAM, or CL_GLOBAL. For custom devices, CL_NONE
|
||||
// can also be returned indicating no local memory support.
|
||||
func (d *Device) LocalMemType() LocalMemType {
|
||||
var memType C.cl_device_local_mem_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_LOCAL_MEM_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return LocalMemType(C.CL_NONE)
|
||||
}
|
||||
return LocalMemType(memType)
|
||||
}
|
||||
|
||||
// Describes the execution capabilities of the device. The mandated minimum capability is CL_EXEC_KERNEL.
|
||||
func (d *Device) ExecutionCapabilities() ExecCapability {
|
||||
var execCap C.cl_device_exec_capabilities
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_EXECUTION_CAPABILITIES, C.size_t(unsafe.Sizeof(execCap)), unsafe.Pointer(&execCap), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get execution capabilities")
|
||||
}
|
||||
return ExecCapability(execCap)
|
||||
}
|
||||
|
||||
func (d *Device) GlobalMemCacheType() MemCacheType {
|
||||
var memType C.cl_device_mem_cache_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return MemCacheType(C.CL_NONE)
|
||||
}
|
||||
return MemCacheType(memType)
|
||||
}
|
||||
|
||||
// Maximum number of work-items that can be specified in each dimension of the work-group to clEnqueueNDRangeKernel.
|
||||
//
|
||||
// Returns n size_t entries, where n is the value returned by the query for CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS.
|
||||
//
|
||||
// The minimum value is (1, 1, 1) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemSizes() []int {
|
||||
dims := d.MaxWorkItemDimensions()
|
||||
sizes := make([]C.size_t, dims)
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_MAX_WORK_ITEM_SIZES, C.size_t(int(unsafe.Sizeof(sizes[0]))*dims), unsafe.Pointer(&sizes[0]), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get max work item sizes")
|
||||
}
|
||||
intSizes := make([]int, dims)
|
||||
for i, s := range sizes {
|
||||
intSizes[i] = int(s)
|
||||
}
|
||||
return intSizes
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
const FPConfigCorrectlyRoundedDivideSqrt FPConfig = C.CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT
|
||||
|
||||
func init() {
|
||||
fpConfigNameMap[FPConfigCorrectlyRoundedDivideSqrt] = "CorrectlyRoundedDivideSqrt"
|
||||
}
|
||||
|
||||
func (d *Device) BuiltInKernels() string {
|
||||
str, _ := d.getInfoString(C.CL_DEVICE_BUILT_IN_KERNELS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// Is CL_FALSE if the implementation does not have a linker available. Is CL_TRUE if the linker is available. This can be CL_FALSE for the embedded platform profile only. This must be CL_TRUE if CL_DEVICE_COMPILER_AVAILABLE is CL_TRUE
|
||||
func (d *Device) LinkerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_LINKER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ParentDevice() *Device {
|
||||
var deviceId C.cl_device_id
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_PARENT_DEVICE, C.size_t(unsafe.Sizeof(deviceId)), unsafe.Pointer(&deviceId), nil); err != C.CL_SUCCESS {
|
||||
panic("ParentDevice failed")
|
||||
}
|
||||
if deviceId == nil {
|
||||
return nil
|
||||
}
|
||||
return &Device{id: deviceId}
|
||||
}
|
||||
|
||||
// Max number of pixels for a 1D image created from a buffer object. The minimum value is 65536 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) ImageMaxBufferSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_BUFFER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of images in a 1D or 2D image array. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE
|
||||
func (d *Device) ImageMaxArraySize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_ARRAY_SIZE, true)
|
||||
return int(val)
|
||||
}
|
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
File diff suppressed because it is too large
Load Diff
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
|
@ -1,315 +0,0 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2013 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */
|
||||
|
||||
/* cl_ext.h contains OpenCL extensions which don't have external */
|
||||
/* (OpenGL, D3D) dependencies. */
|
||||
|
||||
#ifndef __CL_EXT_H
|
||||
#define __CL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <AvailabilityMacros.h>
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
/* cl_khr_fp16 extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_HALF_FP_CONFIG 0x1033
|
||||
|
||||
/* Memory object destruction
|
||||
*
|
||||
* Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR
|
||||
*
|
||||
* Registers a user callback function that will be called when the memory object is deleted and its resources
|
||||
* freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback
|
||||
* stack associated with memobj. The registered user callback functions are called in the reverse order in
|
||||
* which they were registered. The user callback functions are called and then the memory object is deleted
|
||||
* and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be
|
||||
* notified when the memory referenced by host_ptr, specified when the memory object is created and used as
|
||||
* the storage bits for the memory object, can be reused or freed.
|
||||
*
|
||||
* The application may not call CL api's with the cl_mem object passed to the pfn_notify.
|
||||
*
|
||||
* Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*/
|
||||
#define cl_APPLE_SetMemObjectDestructor 1
|
||||
cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */,
|
||||
void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
|
||||
void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Context Logging Functions
|
||||
*
|
||||
* The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().
|
||||
* Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*
|
||||
* clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger
|
||||
*/
|
||||
#define cl_APPLE_ContextLoggingFunctions 1
|
||||
extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */
|
||||
extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */
|
||||
extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/************************
|
||||
* cl_khr_icd extension *
|
||||
************************/
|
||||
#define cl_khr_icd 1
|
||||
|
||||
/* cl_platform_info */
|
||||
#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_PLATFORM_NOT_FOUND_KHR -1001
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clIcdGetPlatformIDsKHR(cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(
|
||||
cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
|
||||
/* Extension: cl_khr_image2D_buffer
|
||||
*
|
||||
* This extension allows a 2D image to be created from a cl_mem buffer without a copy.
|
||||
* The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.
|
||||
* Both the sampler and sampler-less read_image built-in functions are supported for 2D images
|
||||
* and 2D images created from a buffer. Similarly, the write_image built-ins are also supported
|
||||
* for 2D images created from a buffer.
|
||||
*
|
||||
* When the 2D image from buffer is created, the client must specify the width,
|
||||
* height, image format (i.e. channel order and channel data type) and optionally the row pitch
|
||||
*
|
||||
* The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.
|
||||
* The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.
|
||||
*/
|
||||
|
||||
/*************************************
|
||||
* cl_khr_initalize_memory extension *
|
||||
*************************************/
|
||||
|
||||
#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x200E
|
||||
|
||||
|
||||
/**************************************
|
||||
* cl_khr_terminate_context extension *
|
||||
**************************************/
|
||||
|
||||
#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x200F
|
||||
#define CL_CONTEXT_TERMINATE_KHR 0x2010
|
||||
|
||||
#define cl_khr_terminate_context 1
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
|
||||
/*
|
||||
* Extension: cl_khr_spir
|
||||
*
|
||||
* This extension adds support to create an OpenCL program object from a
|
||||
* Standard Portable Intermediate Representation (SPIR) instance
|
||||
*/
|
||||
|
||||
#define CL_DEVICE_SPIR_VERSIONS 0x40E0
|
||||
#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1
|
||||
|
||||
|
||||
/******************************************
|
||||
* cl_nv_device_attribute_query extension *
|
||||
******************************************/
|
||||
/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
|
||||
#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002
|
||||
#define CL_DEVICE_WARP_SIZE_NV 0x4003
|
||||
#define CL_DEVICE_GPU_OVERLAP_NV 0x4004
|
||||
#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
|
||||
#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006
|
||||
|
||||
/*********************************
|
||||
* cl_amd_device_attribute_query *
|
||||
*********************************/
|
||||
#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036
|
||||
|
||||
/*********************************
|
||||
* cl_arm_printf extension
|
||||
*********************************/
|
||||
#define CL_PRINTF_CALLBACK_ARM 0x40B0
|
||||
#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1
|
||||
|
||||
#ifdef CL_VERSION_1_1
|
||||
/***********************************
|
||||
* cl_ext_device_fission extension *
|
||||
***********************************/
|
||||
#define cl_ext_device_fission 1
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef cl_ulong cl_device_partition_property_ext;
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clCreateSubDevicesEXT( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
/* cl_device_partition_property_ext */
|
||||
#define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050
|
||||
#define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051
|
||||
#define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052
|
||||
#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053
|
||||
|
||||
/* clDeviceGetInfo selectors */
|
||||
#define CL_DEVICE_PARENT_DEVICE_EXT 0x4054
|
||||
#define CL_DEVICE_PARTITION_TYPES_EXT 0x4055
|
||||
#define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056
|
||||
#define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057
|
||||
#define CL_DEVICE_PARTITION_STYLE_EXT 0x4058
|
||||
|
||||
/* error codes */
|
||||
#define CL_DEVICE_PARTITION_FAILED_EXT -1057
|
||||
#define CL_INVALID_PARTITION_COUNT_EXT -1058
|
||||
#define CL_INVALID_PARTITION_NAME_EXT -1059
|
||||
|
||||
/* CL_AFFINITY_DOMAINs */
|
||||
#define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1
|
||||
#define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2
|
||||
#define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3
|
||||
#define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4
|
||||
#define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10
|
||||
#define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100
|
||||
|
||||
/* cl_device_partition_property_ext list terminators */
|
||||
#define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1)
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ext_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29)
|
||||
|
||||
#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0
|
||||
#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1
|
||||
#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2
|
||||
#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3
|
||||
#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4
|
||||
#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5
|
||||
#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6
|
||||
#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7
|
||||
|
||||
typedef cl_uint cl_image_pitch_info_qcom;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetDeviceImageInfoQCOM(cl_device_id device,
|
||||
size_t image_width,
|
||||
size_t image_height,
|
||||
const cl_image_format *image_format,
|
||||
cl_image_pitch_info_qcom param_name,
|
||||
size_t param_value_size,
|
||||
void *param_value,
|
||||
size_t *param_value_size_ret);
|
||||
|
||||
typedef struct _cl_mem_ext_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Legal values will be defined in layered extensions. */
|
||||
cl_uint allocation_type;
|
||||
|
||||
/* Host cache policy for this external memory allocation. */
|
||||
cl_uint host_cache_policy;
|
||||
|
||||
} cl_mem_ext_host_ptr;
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ion_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8
|
||||
|
||||
typedef struct _cl_mem_ion_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Must be CL_MEM_ION_HOST_PTR_QCOM for ION allocations. */
|
||||
cl_mem_ext_host_ptr ext_host_ptr;
|
||||
|
||||
/* ION file descriptor */
|
||||
int ion_filedesc;
|
||||
|
||||
/* Host pointer to the ION allocated memory */
|
||||
void* ion_hostptr;
|
||||
|
||||
} cl_mem_ion_host_ptr;
|
||||
|
||||
#endif /* CL_VERSION_1_1 */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __CL_EXT_H */
|
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
|
@ -1,158 +0,0 @@
|
|||
/**********************************************************************************
|
||||
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
#ifndef __OPENCL_CL_GL_H
|
||||
#define __OPENCL_CL_GL_H
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef cl_uint cl_gl_object_type;
|
||||
typedef cl_uint cl_gl_texture_info;
|
||||
typedef cl_uint cl_gl_platform_info;
|
||||
typedef struct __GLsync *cl_GLsync;
|
||||
|
||||
/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
|
||||
#define CL_GL_OBJECT_BUFFER 0x2000
|
||||
#define CL_GL_OBJECT_TEXTURE2D 0x2001
|
||||
#define CL_GL_OBJECT_TEXTURE3D 0x2002
|
||||
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
|
||||
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
|
||||
#define CL_GL_OBJECT_TEXTURE1D 0x200F
|
||||
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
|
||||
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
|
||||
|
||||
/* cl_gl_texture_info */
|
||||
#define CL_GL_TEXTURE_TARGET 0x2004
|
||||
#define CL_GL_MIPMAP_LEVEL 0x2005
|
||||
#define CL_GL_NUM_SAMPLES 0x2012
|
||||
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLBuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* bufobj */,
|
||||
int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLRenderbuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* renderbuffer */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLObjectInfo(cl_mem /* memobj */,
|
||||
cl_gl_object_type * /* gl_object_type */,
|
||||
cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLTextureInfo(cl_mem /* memobj */,
|
||||
cl_gl_texture_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Deprecated OpenCL 1.1 APIs */
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture2D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture3D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
/* cl_khr_gl_sharing extension */
|
||||
|
||||
#define cl_khr_gl_sharing 1
|
||||
|
||||
typedef cl_uint cl_gl_context_info;
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
|
||||
|
||||
/* cl_gl_context_info */
|
||||
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
|
||||
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
|
||||
|
||||
/* Additional cl_context_properties */
|
||||
#define CL_GL_CONTEXT_KHR 0x2008
|
||||
#define CL_EGL_DISPLAY_KHR 0x2009
|
||||
#define CL_GLX_DISPLAY_KHR 0x200A
|
||||
#define CL_WGL_HDC_KHR 0x200B
|
||||
#define CL_CGL_SHAREGROUP_KHR 0x200C
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLContextInfoKHR(const cl_context_properties * /* properties */,
|
||||
cl_gl_context_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(
|
||||
const cl_context_properties * properties,
|
||||
cl_gl_context_info param_name,
|
||||
size_t param_value_size,
|
||||
void * param_value,
|
||||
size_t * param_value_size_ret);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_H */
|
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
/**********************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */
|
||||
/* OpenGL dependencies. */
|
||||
|
||||
#ifndef __OPENCL_CL_GL_EXT_H
|
||||
#define __OPENCL_CL_GL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl_gl.h>
|
||||
|
||||
/*
|
||||
* For each extension, follow this template
|
||||
* cl_VEN_extname extension */
|
||||
/* #define cl_VEN_extname 1
|
||||
* ... define new types, if any
|
||||
* ... define new tokens, if any
|
||||
* ... define new APIs, if any
|
||||
*
|
||||
* If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header
|
||||
* This allows us to avoid having to decide whether to include GL headers or GLES here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* cl_khr_gl_event extension
|
||||
* See section 9.9 in the OpenCL 1.1 spec for more information
|
||||
*/
|
||||
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
|
||||
|
||||
extern CL_API_ENTRY cl_event CL_API_CALL
|
||||
clCreateEventFromGLsyncKHR(cl_context /* context */,
|
||||
cl_GLsync /* cl_GLsync */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_EXT_H */
|
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
File diff suppressed because it is too large
Load Diff
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
#ifndef __OPENCL_H
|
||||
#define __OPENCL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
#include <cl_gl.h>
|
||||
#include <cl_gl_ext.h>
|
||||
#include <cl_ext.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_H */
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import (
|
||||
"image"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (ctx *Context) CreateImage(flags MemFlag, imageFormat ImageFormat, imageDesc ImageDescription, data []byte) (*MemObject, error) {
|
||||
format := imageFormat.toCl()
|
||||
desc := imageDesc.toCl()
|
||||
var dataPtr unsafe.Pointer
|
||||
if data != nil {
|
||||
dataPtr = unsafe.Pointer(&data[0])
|
||||
}
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateImage(ctx.clContext, C.cl_mem_flags(flags), &format, &desc, dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, len(data)), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageSimple(flags MemFlag, width, height int, channelOrder ChannelOrder, channelDataType ChannelDataType, data []byte) (*MemObject, error) {
|
||||
format := ImageFormat{channelOrder, channelDataType}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: width,
|
||||
Height: height,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, data)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageFromImage(flags MemFlag, img image.Image) (*MemObject, error) {
|
||||
switch m := img.(type) {
|
||||
case *image.Gray:
|
||||
format := ImageFormat{ChannelOrderIntensity, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
case *image.RGBA:
|
||||
format := ImageFormat{ChannelOrderRGBA, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
}
|
||||
|
||||
b := img.Bounds()
|
||||
w := b.Dx()
|
||||
h := b.Dy()
|
||||
data := make([]byte, w*h*4)
|
||||
dataOffset := 0
|
||||
for y := 0; y < h; y++ {
|
||||
for x := 0; x < w; x++ {
|
||||
c := img.At(x+b.Min.X, y+b.Min.Y)
|
||||
r, g, b, a := c.RGBA()
|
||||
data[dataOffset] = uint8(r >> 8)
|
||||
data[dataOffset+1] = uint8(g >> 8)
|
||||
data[dataOffset+2] = uint8(b >> 8)
|
||||
data[dataOffset+3] = uint8(a >> 8)
|
||||
dataOffset += 4
|
||||
}
|
||||
}
|
||||
return ctx.CreateImageSimple(flags, w, h, ChannelOrderRGBA, ChannelDataTypeUNormInt8, data)
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type ErrUnsupportedArgumentType struct {
|
||||
Index int
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (e ErrUnsupportedArgumentType) Error() string {
|
||||
return fmt.Sprintf("cl: unsupported argument type for index %d: %+v", e.Index, e.Value)
|
||||
}
|
||||
|
||||
type Kernel struct {
|
||||
clKernel C.cl_kernel
|
||||
name string
|
||||
}
|
||||
|
||||
type LocalBuffer int
|
||||
|
||||
func releaseKernel(k *Kernel) {
|
||||
if k.clKernel != nil {
|
||||
C.clReleaseKernel(k.clKernel)
|
||||
k.clKernel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) Release() {
|
||||
releaseKernel(k)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgs(args ...interface{}) error {
|
||||
for index, arg := range args {
|
||||
if err := k.SetArg(index, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArg(index int, arg interface{}) error {
|
||||
switch val := arg.(type) {
|
||||
case uint8:
|
||||
return k.SetArgUint8(index, val)
|
||||
case int8:
|
||||
return k.SetArgInt8(index, val)
|
||||
case uint32:
|
||||
return k.SetArgUint32(index, val)
|
||||
case uint64:
|
||||
return k.SetArgUint64(index, val)
|
||||
case int32:
|
||||
return k.SetArgInt32(index, val)
|
||||
case float32:
|
||||
return k.SetArgFloat32(index, val)
|
||||
case *MemObject:
|
||||
return k.SetArgBuffer(index, val)
|
||||
case LocalBuffer:
|
||||
return k.SetArgLocal(index, int(val))
|
||||
default:
|
||||
return ErrUnsupportedArgumentType{Index: index, Value: arg}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgBuffer(index int, buffer *MemObject) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(buffer.clMem)), unsafe.Pointer(&buffer.clMem))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgFloat32(index int, val float32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt8(index int, val int8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint8(index int, val uint8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt32(index int, val int32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint32(index int, val uint32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint64(index int, val uint64) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgLocal(index int, size int) error {
|
||||
return k.SetArgUnsafe(index, size, nil)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUnsafe(index, argSize int, arg unsafe.Pointer) error {
|
||||
//fmt.Println("FUNKY: ", index, argSize)
|
||||
return toError(C.clSetKernelArg(k.clKernel, C.cl_uint(index), C.size_t(argSize), arg))
|
||||
}
|
||||
|
||||
func (k *Kernel) PreferredWorkGroupSizeMultiple(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) WorkGroupSize(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_WORK_GROUP_SIZE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) NumArgs() (int, error) {
|
||||
var num C.cl_uint
|
||||
err := C.clGetKernelInfo(k.clKernel, C.CL_KERNEL_NUM_ARGS, C.size_t(unsafe.Sizeof(num)), unsafe.Pointer(&num), nil)
|
||||
return int(num), toError(err)
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
// +build !cl12
|
||||
|
||||
package cl
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
return "", ErrUnsupported
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
var strC [1024]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetKernelArgInfo(k.clKernel, C.cl_uint(index), C.CL_KERNEL_ARG_NAME, 1024, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:strN]), nil
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const maxPlatforms = 32
|
||||
|
||||
type Platform struct {
|
||||
id C.cl_platform_id
|
||||
}
|
||||
|
||||
// Obtain the list of platforms available.
|
||||
func GetPlatforms() ([]*Platform, error) {
|
||||
var platformIds [maxPlatforms]C.cl_platform_id
|
||||
var nPlatforms C.cl_uint
|
||||
if err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
platforms := make([]*Platform, nPlatforms)
|
||||
for i := 0; i < int(nPlatforms); i++ {
|
||||
platforms[i] = &Platform{id: platformIds[i]}
|
||||
}
|
||||
return platforms, nil
|
||||
}
|
||||
|
||||
func (p *Platform) GetDevices(deviceType DeviceType) ([]*Device, error) {
|
||||
return GetDevices(p, deviceType)
|
||||
}
|
||||
|
||||
func (p *Platform) getInfoString(param C.cl_platform_info) (string, error) {
|
||||
var strC [2048]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetPlatformInfo(p.id, param, 2048, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:(strN - 1)]), nil
|
||||
}
|
||||
|
||||
func (p *Platform) Name() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_NAME); err != nil {
|
||||
panic("Platform.Name() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Vendor() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VENDOR); err != nil {
|
||||
panic("Platform.Vendor() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Profile() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_PROFILE); err != nil {
|
||||
panic("Platform.Profile() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Version() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VERSION); err != nil {
|
||||
panic("Platform.Version() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Extensions() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_EXTENSIONS); err != nil {
|
||||
panic("Platform.Extensions() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type BuildError struct {
|
||||
Message string
|
||||
Device *Device
|
||||
}
|
||||
|
||||
func (e BuildError) Error() string {
|
||||
if e.Device != nil {
|
||||
return fmt.Sprintf("cl: build error on %q: %s", e.Device.Name(), e.Message)
|
||||
} else {
|
||||
return fmt.Sprintf("cl: build error: %s", e.Message)
|
||||
}
|
||||
}
|
||||
|
||||
type Program struct {
|
||||
clProgram C.cl_program
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
func releaseProgram(p *Program) {
|
||||
if p.clProgram != nil {
|
||||
C.clReleaseProgram(p.clProgram)
|
||||
p.clProgram = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Program) Release() {
|
||||
releaseProgram(p)
|
||||
}
|
||||
|
||||
func (p *Program) BuildProgram(devices []*Device, options string) error {
|
||||
var cOptions *C.char
|
||||
if options != "" {
|
||||
cOptions = C.CString(options)
|
||||
defer C.free(unsafe.Pointer(cOptions))
|
||||
}
|
||||
var deviceList []C.cl_device_id
|
||||
var deviceListPtr *C.cl_device_id
|
||||
numDevices := C.cl_uint(len(devices))
|
||||
if devices != nil && len(devices) > 0 {
|
||||
deviceList = buildDeviceIdList(devices)
|
||||
deviceListPtr = &deviceList[0]
|
||||
}
|
||||
if err := C.clBuildProgram(p.clProgram, numDevices, deviceListPtr, cOptions, nil, nil); err != C.CL_SUCCESS {
|
||||
buffer := make([]byte, 4096)
|
||||
var bLen C.size_t
|
||||
var err C.cl_int
|
||||
|
||||
for _, dev := range p.devices {
|
||||
for i := 2; i >= 0; i-- {
|
||||
err = C.clGetProgramBuildInfo(p.clProgram, dev.id, C.CL_PROGRAM_BUILD_LOG, C.size_t(len(buffer)), unsafe.Pointer(&buffer[0]), &bLen)
|
||||
if err == C.CL_INVALID_VALUE && i > 0 && bLen < 1024*1024 {
|
||||
// INVALID_VALUE probably means our buffer isn't large enough
|
||||
buffer = make([]byte, bLen)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != C.CL_SUCCESS {
|
||||
return toError(err)
|
||||
}
|
||||
|
||||
if bLen > 1 {
|
||||
return BuildError{
|
||||
Device: dev,
|
||||
Message: string(buffer[:bLen-1]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return BuildError{
|
||||
Device: nil,
|
||||
Message: "build failed and produced no log entries",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Program) CreateKernel(name string) (*Kernel, error) {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
var err C.cl_int
|
||||
clKernel := C.clCreateKernel(p.clProgram, cName, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
kernel := &Kernel{clKernel: clKernel, name: name}
|
||||
runtime.SetFinalizer(kernel, releaseKernel)
|
||||
return kernel, nil
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type CommandQueueProperty int
|
||||
|
||||
const (
|
||||
CommandQueueOutOfOrderExecModeEnable CommandQueueProperty = C.CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE
|
||||
CommandQueueProfilingEnable CommandQueueProperty = C.CL_QUEUE_PROFILING_ENABLE
|
||||
)
|
||||
|
||||
type CommandQueue struct {
|
||||
clQueue C.cl_command_queue
|
||||
device *Device
|
||||
}
|
||||
|
||||
func releaseCommandQueue(q *CommandQueue) {
|
||||
if q.clQueue != nil {
|
||||
C.clReleaseCommandQueue(q.clQueue)
|
||||
q.clQueue = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Call clReleaseCommandQueue on the CommandQueue. Using the CommandQueue after Release will cause a panick.
|
||||
func (q *CommandQueue) Release() {
|
||||
releaseCommandQueue(q)
|
||||
}
|
||||
|
||||
// Blocks until all previously queued OpenCL commands in a command-queue are issued to the associated device and have completed.
|
||||
func (q *CommandQueue) Finish() error {
|
||||
return toError(C.clFinish(q.clQueue))
|
||||
}
|
||||
|
||||
// Issues all previously queued OpenCL commands in a command-queue to the device associated with the command-queue.
|
||||
func (q *CommandQueue) Flush() error {
|
||||
return toError(C.clFlush(q.clQueue))
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of the buffer object given by buffer into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapBuffer(buffer *MemObject, blocking bool, flags MapFlag, offset, size int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
ptr := C.clEnqueueMapBuffer(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
return &MappedMemObject{ptr: ptr, size: size}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of an image object into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapImage(buffer *MemObject, blocking bool, flags MapFlag, origin, region [3]int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
var rowPitch, slicePitch C.size_t
|
||||
ptr := C.clEnqueueMapImage(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), &cOrigin[0], &cRegion[0], &rowPitch, &slicePitch, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
size := 0 // TODO: could calculate this
|
||||
return &MappedMemObject{ptr: ptr, size: size, rowPitch: int(rowPitch), slicePitch: int(slicePitch)}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to unmap a previously mapped region of a memory object.
|
||||
func (q *CommandQueue) EnqueueUnmapMemObject(buffer *MemObject, mappedObj *MappedMemObject, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
if err := C.clEnqueueUnmapMemObject(q.clQueue, buffer.clMem, mappedObj.ptr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(event), nil
|
||||
}
|
||||
|
||||
// Enqueues a command to copy a buffer object to another buffer object.
|
||||
func (q *CommandQueue) EnqueueCopyBuffer(srcBuffer, dstBuffer *MemObject, srcOffset, dstOffset, byteCount int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueCopyBuffer(q.clQueue, srcBuffer.clMem, dstBuffer.clMem, C.size_t(srcOffset), C.size_t(dstOffset), C.size_t(byteCount), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueue commands to write to a buffer object from host memory.
|
||||
func (q *CommandQueue) EnqueueWriteBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueWriteBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueWriteBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueue commands to read from a buffer object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueReadBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueReadBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueues a command to execute a kernel on a device.
|
||||
func (q *CommandQueue) EnqueueNDRangeKernel(kernel *Kernel, globalWorkOffset, globalWorkSize, localWorkSize []int, eventWaitList []*Event) (*Event, error) {
|
||||
workDim := len(globalWorkSize)
|
||||
var globalWorkOffsetList []C.size_t
|
||||
var globalWorkOffsetPtr *C.size_t
|
||||
if globalWorkOffset != nil {
|
||||
globalWorkOffsetList = make([]C.size_t, len(globalWorkOffset))
|
||||
for i, off := range globalWorkOffset {
|
||||
globalWorkOffsetList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkOffsetPtr = &globalWorkOffsetList[0]
|
||||
}
|
||||
var globalWorkSizeList []C.size_t
|
||||
var globalWorkSizePtr *C.size_t
|
||||
if globalWorkSize != nil {
|
||||
globalWorkSizeList = make([]C.size_t, len(globalWorkSize))
|
||||
for i, off := range globalWorkSize {
|
||||
globalWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkSizePtr = &globalWorkSizeList[0]
|
||||
}
|
||||
var localWorkSizeList []C.size_t
|
||||
var localWorkSizePtr *C.size_t
|
||||
if localWorkSize != nil {
|
||||
localWorkSizeList = make([]C.size_t, len(localWorkSize))
|
||||
for i, off := range localWorkSize {
|
||||
localWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
localWorkSizePtr = &localWorkSizeList[0]
|
||||
}
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueNDRangeKernel(q.clQueue, kernel.clKernel, C.cl_uint(workDim), globalWorkOffsetPtr, globalWorkSizePtr, localWorkSizePtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to read from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to write from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueWriteImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueFillBuffer(buffer *MemObject, pattern unsafe.Pointer, patternSize, offset, size int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueFillBuffer(q.clQueue, buffer.clMem, pattern, C.size_t(patternSize), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// A synchronization point that enqueues a barrier operation.
|
||||
func (q *CommandQueue) EnqueueBarrierWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueBarrierWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a marker command which waits for either a list of events to complete, or all previously enqueued commands to complete.
|
||||
func (q *CommandQueue) EnqueueMarkerWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueMarkerWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
|
@ -1,487 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknown = errors.New("cl: unknown error") // Generally an unexpected result from an OpenCL function (e.g. CL_SUCCESS but null pointer)
|
||||
)
|
||||
|
||||
type ErrOther int
|
||||
|
||||
func (e ErrOther) Error() string {
|
||||
return fmt.Sprintf("cl: error %d", int(e))
|
||||
}
|
||||
|
||||
var (
|
||||
ErrDeviceNotFound = errors.New("cl: Device Not Found")
|
||||
ErrDeviceNotAvailable = errors.New("cl: Device Not Available")
|
||||
ErrCompilerNotAvailable = errors.New("cl: Compiler Not Available")
|
||||
ErrMemObjectAllocationFailure = errors.New("cl: Mem Object Allocation Failure")
|
||||
ErrOutOfResources = errors.New("cl: Out Of Resources")
|
||||
ErrOutOfHostMemory = errors.New("cl: Out Of Host Memory")
|
||||
ErrProfilingInfoNotAvailable = errors.New("cl: Profiling Info Not Available")
|
||||
ErrMemCopyOverlap = errors.New("cl: Mem Copy Overlap")
|
||||
ErrImageFormatMismatch = errors.New("cl: Image Format Mismatch")
|
||||
ErrImageFormatNotSupported = errors.New("cl: Image Format Not Supported")
|
||||
ErrBuildProgramFailure = errors.New("cl: Build Program Failure")
|
||||
ErrMapFailure = errors.New("cl: Map Failure")
|
||||
ErrMisalignedSubBufferOffset = errors.New("cl: Misaligned Sub Buffer Offset")
|
||||
ErrExecStatusErrorForEventsInWaitList = errors.New("cl: Exec Status Error For Events In Wait List")
|
||||
ErrCompileProgramFailure = errors.New("cl: Compile Program Failure")
|
||||
ErrLinkerNotAvailable = errors.New("cl: Linker Not Available")
|
||||
ErrLinkProgramFailure = errors.New("cl: Link Program Failure")
|
||||
ErrDevicePartitionFailed = errors.New("cl: Device Partition Failed")
|
||||
ErrKernelArgInfoNotAvailable = errors.New("cl: Kernel Arg Info Not Available")
|
||||
ErrInvalidValue = errors.New("cl: Invalid Value")
|
||||
ErrInvalidDeviceType = errors.New("cl: Invalid Device Type")
|
||||
ErrInvalidPlatform = errors.New("cl: Invalid Platform")
|
||||
ErrInvalidDevice = errors.New("cl: Invalid Device")
|
||||
ErrInvalidContext = errors.New("cl: Invalid Context")
|
||||
ErrInvalidQueueProperties = errors.New("cl: Invalid Queue Properties")
|
||||
ErrInvalidCommandQueue = errors.New("cl: Invalid Command Queue")
|
||||
ErrInvalidHostPtr = errors.New("cl: Invalid Host Ptr")
|
||||
ErrInvalidMemObject = errors.New("cl: Invalid Mem Object")
|
||||
ErrInvalidImageFormatDescriptor = errors.New("cl: Invalid Image Format Descriptor")
|
||||
ErrInvalidImageSize = errors.New("cl: Invalid Image Size")
|
||||
ErrInvalidSampler = errors.New("cl: Invalid Sampler")
|
||||
ErrInvalidBinary = errors.New("cl: Invalid Binary")
|
||||
ErrInvalidBuildOptions = errors.New("cl: Invalid Build Options")
|
||||
ErrInvalidProgram = errors.New("cl: Invalid Program")
|
||||
ErrInvalidProgramExecutable = errors.New("cl: Invalid Program Executable")
|
||||
ErrInvalidKernelName = errors.New("cl: Invalid Kernel Name")
|
||||
ErrInvalidKernelDefinition = errors.New("cl: Invalid Kernel Definition")
|
||||
ErrInvalidKernel = errors.New("cl: Invalid Kernel")
|
||||
ErrInvalidArgIndex = errors.New("cl: Invalid Arg Index")
|
||||
ErrInvalidArgValue = errors.New("cl: Invalid Arg Value")
|
||||
ErrInvalidArgSize = errors.New("cl: Invalid Arg Size")
|
||||
ErrInvalidKernelArgs = errors.New("cl: Invalid Kernel Args")
|
||||
ErrInvalidWorkDimension = errors.New("cl: Invalid Work Dimension")
|
||||
ErrInvalidWorkGroupSize = errors.New("cl: Invalid Work Group Size")
|
||||
ErrInvalidWorkItemSize = errors.New("cl: Invalid Work Item Size")
|
||||
ErrInvalidGlobalOffset = errors.New("cl: Invalid Global Offset")
|
||||
ErrInvalidEventWaitList = errors.New("cl: Invalid Event Wait List")
|
||||
ErrInvalidEvent = errors.New("cl: Invalid Event")
|
||||
ErrInvalidOperation = errors.New("cl: Invalid Operation")
|
||||
ErrInvalidGlObject = errors.New("cl: Invalid Gl Object")
|
||||
ErrInvalidBufferSize = errors.New("cl: Invalid Buffer Size")
|
||||
ErrInvalidMipLevel = errors.New("cl: Invalid Mip Level")
|
||||
ErrInvalidGlobalWorkSize = errors.New("cl: Invalid Global Work Size")
|
||||
ErrInvalidProperty = errors.New("cl: Invalid Property")
|
||||
ErrInvalidImageDescriptor = errors.New("cl: Invalid Image Descriptor")
|
||||
ErrInvalidCompilerOptions = errors.New("cl: Invalid Compiler Options")
|
||||
ErrInvalidLinkerOptions = errors.New("cl: Invalid Linker Options")
|
||||
ErrInvalidDevicePartitionCount = errors.New("cl: Invalid Device Partition Count")
|
||||
)
|
||||
var errorMap = map[C.cl_int]error{
|
||||
C.CL_SUCCESS: nil,
|
||||
C.CL_DEVICE_NOT_FOUND: ErrDeviceNotFound,
|
||||
C.CL_DEVICE_NOT_AVAILABLE: ErrDeviceNotAvailable,
|
||||
C.CL_COMPILER_NOT_AVAILABLE: ErrCompilerNotAvailable,
|
||||
C.CL_MEM_OBJECT_ALLOCATION_FAILURE: ErrMemObjectAllocationFailure,
|
||||
C.CL_OUT_OF_RESOURCES: ErrOutOfResources,
|
||||
C.CL_OUT_OF_HOST_MEMORY: ErrOutOfHostMemory,
|
||||
C.CL_PROFILING_INFO_NOT_AVAILABLE: ErrProfilingInfoNotAvailable,
|
||||
C.CL_MEM_COPY_OVERLAP: ErrMemCopyOverlap,
|
||||
C.CL_IMAGE_FORMAT_MISMATCH: ErrImageFormatMismatch,
|
||||
C.CL_IMAGE_FORMAT_NOT_SUPPORTED: ErrImageFormatNotSupported,
|
||||
C.CL_BUILD_PROGRAM_FAILURE: ErrBuildProgramFailure,
|
||||
C.CL_MAP_FAILURE: ErrMapFailure,
|
||||
C.CL_MISALIGNED_SUB_BUFFER_OFFSET: ErrMisalignedSubBufferOffset,
|
||||
C.CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST: ErrExecStatusErrorForEventsInWaitList,
|
||||
C.CL_INVALID_VALUE: ErrInvalidValue,
|
||||
C.CL_INVALID_DEVICE_TYPE: ErrInvalidDeviceType,
|
||||
C.CL_INVALID_PLATFORM: ErrInvalidPlatform,
|
||||
C.CL_INVALID_DEVICE: ErrInvalidDevice,
|
||||
C.CL_INVALID_CONTEXT: ErrInvalidContext,
|
||||
C.CL_INVALID_QUEUE_PROPERTIES: ErrInvalidQueueProperties,
|
||||
C.CL_INVALID_COMMAND_QUEUE: ErrInvalidCommandQueue,
|
||||
C.CL_INVALID_HOST_PTR: ErrInvalidHostPtr,
|
||||
C.CL_INVALID_MEM_OBJECT: ErrInvalidMemObject,
|
||||
C.CL_INVALID_IMAGE_FORMAT_DESCRIPTOR: ErrInvalidImageFormatDescriptor,
|
||||
C.CL_INVALID_IMAGE_SIZE: ErrInvalidImageSize,
|
||||
C.CL_INVALID_SAMPLER: ErrInvalidSampler,
|
||||
C.CL_INVALID_BINARY: ErrInvalidBinary,
|
||||
C.CL_INVALID_BUILD_OPTIONS: ErrInvalidBuildOptions,
|
||||
C.CL_INVALID_PROGRAM: ErrInvalidProgram,
|
||||
C.CL_INVALID_PROGRAM_EXECUTABLE: ErrInvalidProgramExecutable,
|
||||
C.CL_INVALID_KERNEL_NAME: ErrInvalidKernelName,
|
||||
C.CL_INVALID_KERNEL_DEFINITION: ErrInvalidKernelDefinition,
|
||||
C.CL_INVALID_KERNEL: ErrInvalidKernel,
|
||||
C.CL_INVALID_ARG_INDEX: ErrInvalidArgIndex,
|
||||
C.CL_INVALID_ARG_VALUE: ErrInvalidArgValue,
|
||||
C.CL_INVALID_ARG_SIZE: ErrInvalidArgSize,
|
||||
C.CL_INVALID_KERNEL_ARGS: ErrInvalidKernelArgs,
|
||||
C.CL_INVALID_WORK_DIMENSION: ErrInvalidWorkDimension,
|
||||
C.CL_INVALID_WORK_GROUP_SIZE: ErrInvalidWorkGroupSize,
|
||||
C.CL_INVALID_WORK_ITEM_SIZE: ErrInvalidWorkItemSize,
|
||||
C.CL_INVALID_GLOBAL_OFFSET: ErrInvalidGlobalOffset,
|
||||
C.CL_INVALID_EVENT_WAIT_LIST: ErrInvalidEventWaitList,
|
||||
C.CL_INVALID_EVENT: ErrInvalidEvent,
|
||||
C.CL_INVALID_OPERATION: ErrInvalidOperation,
|
||||
C.CL_INVALID_GL_OBJECT: ErrInvalidGlObject,
|
||||
C.CL_INVALID_BUFFER_SIZE: ErrInvalidBufferSize,
|
||||
C.CL_INVALID_MIP_LEVEL: ErrInvalidMipLevel,
|
||||
C.CL_INVALID_GLOBAL_WORK_SIZE: ErrInvalidGlobalWorkSize,
|
||||
C.CL_INVALID_PROPERTY: ErrInvalidProperty,
|
||||
}
|
||||
|
||||
func toError(code C.cl_int) error {
|
||||
if err, ok := errorMap[code]; ok {
|
||||
return err
|
||||
}
|
||||
return ErrOther(code)
|
||||
}
|
||||
|
||||
type LocalMemType int
|
||||
|
||||
const (
|
||||
LocalMemTypeNone LocalMemType = C.CL_NONE
|
||||
LocalMemTypeGlobal LocalMemType = C.CL_GLOBAL
|
||||
LocalMemTypeLocal LocalMemType = C.CL_LOCAL
|
||||
)
|
||||
|
||||
var localMemTypeMap = map[LocalMemType]string{
|
||||
LocalMemTypeNone: "None",
|
||||
LocalMemTypeGlobal: "Global",
|
||||
LocalMemTypeLocal: "Local",
|
||||
}
|
||||
|
||||
func (t LocalMemType) String() string {
|
||||
name := localMemTypeMap[t]
|
||||
if name == "" {
|
||||
name = "Unknown"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ExecCapability int
|
||||
|
||||
const (
|
||||
ExecCapabilityKernel ExecCapability = C.CL_EXEC_KERNEL // The OpenCL device can execute OpenCL kernels.
|
||||
ExecCapabilityNativeKernel ExecCapability = C.CL_EXEC_NATIVE_KERNEL // The OpenCL device can execute native kernels.
|
||||
)
|
||||
|
||||
func (ec ExecCapability) String() string {
|
||||
var parts []string
|
||||
if ec&ExecCapabilityKernel != 0 {
|
||||
parts = append(parts, "Kernel")
|
||||
}
|
||||
if ec&ExecCapabilityNativeKernel != 0 {
|
||||
parts = append(parts, "NativeKernel")
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type MemCacheType int
|
||||
|
||||
const (
|
||||
MemCacheTypeNone MemCacheType = C.CL_NONE
|
||||
MemCacheTypeReadOnlyCache MemCacheType = C.CL_READ_ONLY_CACHE
|
||||
MemCacheTypeReadWriteCache MemCacheType = C.CL_READ_WRITE_CACHE
|
||||
)
|
||||
|
||||
func (ct MemCacheType) String() string {
|
||||
switch ct {
|
||||
case MemCacheTypeNone:
|
||||
return "None"
|
||||
case MemCacheTypeReadOnlyCache:
|
||||
return "ReadOnly"
|
||||
case MemCacheTypeReadWriteCache:
|
||||
return "ReadWrite"
|
||||
}
|
||||
return fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
|
||||
type MemFlag int
|
||||
|
||||
const (
|
||||
MemReadWrite MemFlag = C.CL_MEM_READ_WRITE
|
||||
MemWriteOnly MemFlag = C.CL_MEM_WRITE_ONLY
|
||||
MemReadOnly MemFlag = C.CL_MEM_READ_ONLY
|
||||
MemUseHostPtr MemFlag = C.CL_MEM_USE_HOST_PTR
|
||||
MemAllocHostPtr MemFlag = C.CL_MEM_ALLOC_HOST_PTR
|
||||
MemCopyHostPtr MemFlag = C.CL_MEM_COPY_HOST_PTR
|
||||
|
||||
MemWriteOnlyHost MemFlag = C.CL_MEM_HOST_WRITE_ONLY
|
||||
MemReadOnlyHost MemFlag = C.CL_MEM_HOST_READ_ONLY
|
||||
MemNoAccessHost MemFlag = C.CL_MEM_HOST_NO_ACCESS
|
||||
)
|
||||
|
||||
type MemObjectType int
|
||||
|
||||
const (
|
||||
MemObjectTypeBuffer MemObjectType = C.CL_MEM_OBJECT_BUFFER
|
||||
MemObjectTypeImage2D MemObjectType = C.CL_MEM_OBJECT_IMAGE2D
|
||||
MemObjectTypeImage3D MemObjectType = C.CL_MEM_OBJECT_IMAGE3D
|
||||
)
|
||||
|
||||
type MapFlag int
|
||||
|
||||
const (
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for reading.
|
||||
MapFlagRead MapFlag = C.CL_MAP_READ
|
||||
MapFlagWrite MapFlag = C.CL_MAP_WRITE
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func (mf MapFlag) toCl() C.cl_map_flags {
|
||||
return C.cl_map_flags(mf)
|
||||
}
|
||||
|
||||
type ChannelOrder int
|
||||
|
||||
const (
|
||||
ChannelOrderR ChannelOrder = C.CL_R
|
||||
ChannelOrderA ChannelOrder = C.CL_A
|
||||
ChannelOrderRG ChannelOrder = C.CL_RG
|
||||
ChannelOrderRA ChannelOrder = C.CL_RA
|
||||
ChannelOrderRGB ChannelOrder = C.CL_RGB
|
||||
ChannelOrderRGBA ChannelOrder = C.CL_RGBA
|
||||
ChannelOrderBGRA ChannelOrder = C.CL_BGRA
|
||||
ChannelOrderARGB ChannelOrder = C.CL_ARGB
|
||||
ChannelOrderIntensity ChannelOrder = C.CL_INTENSITY
|
||||
ChannelOrderLuminance ChannelOrder = C.CL_LUMINANCE
|
||||
ChannelOrderRx ChannelOrder = C.CL_Rx
|
||||
ChannelOrderRGx ChannelOrder = C.CL_RGx
|
||||
ChannelOrderRGBx ChannelOrder = C.CL_RGBx
|
||||
)
|
||||
|
||||
var channelOrderNameMap = map[ChannelOrder]string{
|
||||
ChannelOrderR: "R",
|
||||
ChannelOrderA: "A",
|
||||
ChannelOrderRG: "RG",
|
||||
ChannelOrderRA: "RA",
|
||||
ChannelOrderRGB: "RGB",
|
||||
ChannelOrderRGBA: "RGBA",
|
||||
ChannelOrderBGRA: "BGRA",
|
||||
ChannelOrderARGB: "ARGB",
|
||||
ChannelOrderIntensity: "Intensity",
|
||||
ChannelOrderLuminance: "Luminance",
|
||||
ChannelOrderRx: "Rx",
|
||||
ChannelOrderRGx: "RGx",
|
||||
ChannelOrderRGBx: "RGBx",
|
||||
}
|
||||
|
||||
func (co ChannelOrder) String() string {
|
||||
name := channelOrderNameMap[co]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(co))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ChannelDataType int
|
||||
|
||||
const (
|
||||
ChannelDataTypeSNormInt8 ChannelDataType = C.CL_SNORM_INT8
|
||||
ChannelDataTypeSNormInt16 ChannelDataType = C.CL_SNORM_INT16
|
||||
ChannelDataTypeUNormInt8 ChannelDataType = C.CL_UNORM_INT8
|
||||
ChannelDataTypeUNormInt16 ChannelDataType = C.CL_UNORM_INT16
|
||||
ChannelDataTypeUNormShort565 ChannelDataType = C.CL_UNORM_SHORT_565
|
||||
ChannelDataTypeUNormShort555 ChannelDataType = C.CL_UNORM_SHORT_555
|
||||
ChannelDataTypeUNormInt101010 ChannelDataType = C.CL_UNORM_INT_101010
|
||||
ChannelDataTypeSignedInt8 ChannelDataType = C.CL_SIGNED_INT8
|
||||
ChannelDataTypeSignedInt16 ChannelDataType = C.CL_SIGNED_INT16
|
||||
ChannelDataTypeSignedInt32 ChannelDataType = C.CL_SIGNED_INT32
|
||||
ChannelDataTypeUnsignedInt8 ChannelDataType = C.CL_UNSIGNED_INT8
|
||||
ChannelDataTypeUnsignedInt16 ChannelDataType = C.CL_UNSIGNED_INT16
|
||||
ChannelDataTypeUnsignedInt32 ChannelDataType = C.CL_UNSIGNED_INT32
|
||||
ChannelDataTypeHalfFloat ChannelDataType = C.CL_HALF_FLOAT
|
||||
ChannelDataTypeFloat ChannelDataType = C.CL_FLOAT
|
||||
)
|
||||
|
||||
var channelDataTypeNameMap = map[ChannelDataType]string{
|
||||
ChannelDataTypeSNormInt8: "SNormInt8",
|
||||
ChannelDataTypeSNormInt16: "SNormInt16",
|
||||
ChannelDataTypeUNormInt8: "UNormInt8",
|
||||
ChannelDataTypeUNormInt16: "UNormInt16",
|
||||
ChannelDataTypeUNormShort565: "UNormShort565",
|
||||
ChannelDataTypeUNormShort555: "UNormShort555",
|
||||
ChannelDataTypeUNormInt101010: "UNormInt101010",
|
||||
ChannelDataTypeSignedInt8: "SignedInt8",
|
||||
ChannelDataTypeSignedInt16: "SignedInt16",
|
||||
ChannelDataTypeSignedInt32: "SignedInt32",
|
||||
ChannelDataTypeUnsignedInt8: "UnsignedInt8",
|
||||
ChannelDataTypeUnsignedInt16: "UnsignedInt16",
|
||||
ChannelDataTypeUnsignedInt32: "UnsignedInt32",
|
||||
ChannelDataTypeHalfFloat: "HalfFloat",
|
||||
ChannelDataTypeFloat: "Float",
|
||||
}
|
||||
|
||||
func (ct ChannelDataType) String() string {
|
||||
name := channelDataTypeNameMap[ct]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ImageFormat struct {
|
||||
ChannelOrder ChannelOrder
|
||||
ChannelDataType ChannelDataType
|
||||
}
|
||||
|
||||
func (f ImageFormat) toCl() C.cl_image_format {
|
||||
var format C.cl_image_format
|
||||
format.image_channel_order = C.cl_channel_order(f.ChannelOrder)
|
||||
format.image_channel_data_type = C.cl_channel_type(f.ChannelDataType)
|
||||
return format
|
||||
}
|
||||
|
||||
type ProfilingInfo int
|
||||
|
||||
const (
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event is enqueued in
|
||||
// a command-queue by the host.
|
||||
ProfilingInfoCommandQueued ProfilingInfo = C.CL_PROFILING_COMMAND_QUEUED
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event that has been
|
||||
// enqueued is submitted by the host to the device associated with the command-queue.
|
||||
ProfilingInfoCommandSubmit ProfilingInfo = C.CL_PROFILING_COMMAND_SUBMIT
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event starts execution on the device.
|
||||
ProfilingInfoCommandStart ProfilingInfo = C.CL_PROFILING_COMMAND_START
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event has finished
|
||||
// execution on the device.
|
||||
ProfilingInfoCommandEnd ProfilingInfo = C.CL_PROFILING_COMMAND_END
|
||||
)
|
||||
|
||||
type CommmandExecStatus int
|
||||
|
||||
const (
|
||||
CommmandExecStatusComplete CommmandExecStatus = C.CL_COMPLETE
|
||||
CommmandExecStatusRunning CommmandExecStatus = C.CL_RUNNING
|
||||
CommmandExecStatusSubmitted CommmandExecStatus = C.CL_SUBMITTED
|
||||
CommmandExecStatusQueued CommmandExecStatus = C.CL_QUEUED
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
clEvent C.cl_event
|
||||
}
|
||||
|
||||
func releaseEvent(ev *Event) {
|
||||
if ev.clEvent != nil {
|
||||
C.clReleaseEvent(ev.clEvent)
|
||||
ev.clEvent = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Event) Release() {
|
||||
releaseEvent(e)
|
||||
}
|
||||
|
||||
func (e *Event) GetEventProfilingInfo(paramName ProfilingInfo) (int64, error) {
|
||||
var paramValue C.cl_ulong
|
||||
if err := C.clGetEventProfilingInfo(e.clEvent, C.cl_profiling_info(paramName), C.size_t(unsafe.Sizeof(paramValue)), unsafe.Pointer(¶mValue), nil); err != C.CL_SUCCESS {
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(paramValue), nil
|
||||
}
|
||||
|
||||
// Sets the execution status of a user event object.
|
||||
//
|
||||
// `status` specifies the new execution status to be set and
|
||||
// can be CL_COMPLETE or a negative integer value to indicate
|
||||
// an error. A negative integer value causes all enqueued commands
|
||||
// that wait on this user event to be terminated. clSetUserEventStatus
|
||||
// can only be called once to change the execution status of event.
|
||||
func (e *Event) SetUserEventStatus(status int) error {
|
||||
return toError(C.clSetUserEventStatus(e.clEvent, C.cl_int(status)))
|
||||
}
|
||||
|
||||
// Waits on the host thread for commands identified by event objects in
|
||||
// events to complete. A command is considered complete if its execution
|
||||
// status is CL_COMPLETE or a negative value. The events specified in
|
||||
// event_list act as synchronization points.
|
||||
//
|
||||
// If the cl_khr_gl_event extension is enabled, event objects can also be
|
||||
// used to reflect the status of an OpenGL sync object. The sync object
|
||||
// in turn refers to a fence command executing in an OpenGL command
|
||||
// stream. This provides another method of coordinating sharing of buffers
|
||||
// and images between OpenGL and OpenCL.
|
||||
func WaitForEvents(events []*Event) error {
|
||||
return toError(C.clWaitForEvents(C.cl_uint(len(events)), eventListPtr(events)))
|
||||
}
|
||||
|
||||
func newEvent(clEvent C.cl_event) *Event {
|
||||
ev := &Event{clEvent: clEvent}
|
||||
runtime.SetFinalizer(ev, releaseEvent)
|
||||
return ev
|
||||
}
|
||||
|
||||
func eventListPtr(el []*Event) *C.cl_event {
|
||||
if el == nil {
|
||||
return nil
|
||||
}
|
||||
elist := make([]C.cl_event, len(el))
|
||||
for i, e := range el {
|
||||
elist[i] = e.clEvent
|
||||
}
|
||||
return (*C.cl_event)(&elist[0])
|
||||
}
|
||||
|
||||
func clBool(b bool) C.cl_bool {
|
||||
if b {
|
||||
return C.CL_TRUE
|
||||
}
|
||||
return C.CL_FALSE
|
||||
}
|
||||
|
||||
func sizeT3(i3 [3]int) [3]C.size_t {
|
||||
var val [3]C.size_t
|
||||
val[0] = C.size_t(i3[0])
|
||||
val[1] = C.size_t(i3[1])
|
||||
val[2] = C.size_t(i3[2])
|
||||
return val
|
||||
}
|
||||
|
||||
type MappedMemObject struct {
|
||||
ptr unsafe.Pointer
|
||||
size int
|
||||
rowPitch int
|
||||
slicePitch int
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) ByteSlice() []byte {
|
||||
var byteSlice []byte
|
||||
sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&byteSlice))
|
||||
sliceHeader.Cap = mb.size
|
||||
sliceHeader.Len = mb.size
|
||||
sliceHeader.Data = uintptr(mb.ptr)
|
||||
return byteSlice
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Ptr() unsafe.Pointer {
|
||||
return mb.ptr
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Size() int {
|
||||
return mb.size
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) RowPitch() int {
|
||||
return mb.rowPitch
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) SlicePitch() int {
|
||||
return mb.slicePitch
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
const (
|
||||
ChannelDataTypeUNormInt24 ChannelDataType = C.CL_UNORM_INT24
|
||||
ChannelOrderDepth ChannelOrder = C.CL_DEPTH
|
||||
ChannelOrderDepthStencil ChannelOrder = C.CL_DEPTH_STENCIL
|
||||
MemHostNoAccess MemFlag = C.CL_MEM_HOST_NO_ACCESS // OpenCL 1.2
|
||||
MemHostReadOnly MemFlag = C.CL_MEM_HOST_READ_ONLY // OpenCL 1.2
|
||||
MemHostWriteOnly MemFlag = C.CL_MEM_HOST_WRITE_ONLY // OpenCL 1.2
|
||||
MemObjectTypeImage1D MemObjectType = C.CL_MEM_OBJECT_IMAGE1D
|
||||
MemObjectTypeImage1DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_ARRAY
|
||||
MemObjectTypeImage1DBuffer MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_BUFFER
|
||||
MemObjectTypeImage2DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE2D_ARRAY
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for writing.
|
||||
//
|
||||
// The contents of the region being mapped are to be discarded. This is typically the case when the
|
||||
// region being mapped is overwritten by the host. This flag allows the implementation to no longer
|
||||
// guarantee that the pointer returned by clEnqueueMapBuffer or clEnqueueMapImage contains the
|
||||
// latest bits in the region being mapped which can be a significant performance enhancement.
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func init() {
|
||||
errorMap[C.CL_COMPILE_PROGRAM_FAILURE] = ErrCompileProgramFailure
|
||||
errorMap[C.CL_DEVICE_PARTITION_FAILED] = ErrDevicePartitionFailed
|
||||
errorMap[C.CL_INVALID_COMPILER_OPTIONS] = ErrInvalidCompilerOptions
|
||||
errorMap[C.CL_INVALID_DEVICE_PARTITION_COUNT] = ErrInvalidDevicePartitionCount
|
||||
errorMap[C.CL_INVALID_IMAGE_DESCRIPTOR] = ErrInvalidImageDescriptor
|
||||
errorMap[C.CL_INVALID_LINKER_OPTIONS] = ErrInvalidLinkerOptions
|
||||
errorMap[C.CL_KERNEL_ARG_INFO_NOT_AVAILABLE] = ErrKernelArgInfoNotAvailable
|
||||
errorMap[C.CL_LINK_PROGRAM_FAILURE] = ErrLinkProgramFailure
|
||||
errorMap[C.CL_LINKER_NOT_AVAILABLE] = ErrLinkerNotAvailable
|
||||
channelOrderNameMap[ChannelOrderDepth] = "Depth"
|
||||
channelOrderNameMap[ChannelOrderDepthStencil] = "DepthStencil"
|
||||
channelDataTypeNameMap[ChannelDataTypeUNormInt24] = "UNormInt24"
|
||||
}
|
||||
|
||||
type ImageDescription struct {
|
||||
Type MemObjectType
|
||||
Width, Height, Depth int
|
||||
ArraySize, RowPitch, SlicePitch int
|
||||
NumMipLevels, NumSamples int
|
||||
Buffer *MemObject
|
||||
}
|
||||
|
||||
func (d ImageDescription) toCl() C.cl_image_desc {
|
||||
var desc C.cl_image_desc
|
||||
desc.image_type = C.cl_mem_object_type(d.Type)
|
||||
desc.image_width = C.size_t(d.Width)
|
||||
desc.image_height = C.size_t(d.Height)
|
||||
desc.image_depth = C.size_t(d.Depth)
|
||||
desc.image_array_size = C.size_t(d.ArraySize)
|
||||
desc.image_row_pitch = C.size_t(d.RowPitch)
|
||||
desc.image_slice_pitch = C.size_t(d.SlicePitch)
|
||||
desc.num_mip_levels = C.cl_uint(d.NumMipLevels)
|
||||
desc.num_samples = C.cl_uint(d.NumSamples)
|
||||
desc.buffer = nil
|
||||
if d.Buffer != nil {
|
||||
desc.buffer = d.Buffer.clMem
|
||||
}
|
||||
return desc
|
||||
}
|
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
// Extension: cl_APPLE_fixed_alpha_channel_orders
|
||||
//
|
||||
// These selectors may be passed to clCreateImage2D() in the cl_image_format.image_channel_order field.
|
||||
// They are like CL_BGRA and CL_ARGB except that the alpha channel to be ignored. On calls to read_imagef,
|
||||
// the alpha will be 0xff (1.0f) if the sample falls in the image and 0 if it does not fall in the image.
|
||||
// On calls to write_imagef, the alpha value is ignored and 0xff (1.0f) is written. These formats are
|
||||
// currently only available for the CL_UNORM_INT8 cl_channel_type. They are intended to support legacy
|
||||
// image formats.
|
||||
const (
|
||||
ChannelOrder1RGBApple ChannelOrder = C.CL_1RGB_APPLE // Introduced in MacOS X.7.
|
||||
ChannelOrderBGR1Apple ChannelOrder = C.CL_BGR1_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
// Extension: cl_APPLE_biased_fixed_point_image_formats
|
||||
//
|
||||
// This selector may be passed to clCreateImage2D() in the cl_image_format.image_channel_data_type field.
|
||||
// It defines a biased signed 1.14 fixed point storage format, with range [-1, 3). The conversion from
|
||||
// float to this fixed point format is defined as follows:
|
||||
//
|
||||
// ushort float_to_sfixed14( float x ){
|
||||
// int i = convert_int_sat_rte( x * 0x1.0p14f ); // scale [-1, 3.0) to [-16384, 3*16384), round to nearest integer
|
||||
// i = add_sat( i, 0x4000 ); // apply bias, to convert to [0, 65535) range
|
||||
// return convert_ushort_sat(i); // clamp to destination size
|
||||
// }
|
||||
//
|
||||
// The inverse conversion is the reverse process. The formats are currently only available on the CPU with
|
||||
// the CL_RGBA channel layout.
|
||||
const (
|
||||
ChannelDataTypeSFixed14Apple ChannelDataType = C.CL_SFIXED14_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
func init() {
|
||||
channelOrderNameMap[ChannelOrder1RGBApple] = "1RGBApple"
|
||||
channelOrderNameMap[ChannelOrderBGR1Apple] = "RGB1Apple"
|
||||
channelDataTypeNameMap[ChannelDataTypeSFixed14Apple] = "SFixed14Apple"
|
||||
}
|
|
@ -1,628 +0,0 @@
|
|||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build opencl
|
||||
|
||||
package ethash
|
||||
|
||||
//#cgo LDFLAGS: -w
|
||||
//#include <stdint.h>
|
||||
//#include <string.h>
|
||||
//#include "src/libethash/internal.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
mrand "math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Gustav-Simonsson/go-opencl/cl"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
This code have two main entry points:
|
||||
|
||||
1. The initCL(...) function configures one or more OpenCL device
|
||||
(for now only GPU) and loads the Ethash DAG onto device memory
|
||||
|
||||
2. The Search(...) function loads a Ethash nonce into device(s) memory and
|
||||
executes the Ethash OpenCL kernel.
|
||||
|
||||
Throughout the code, we refer to "host memory" and "device memory".
|
||||
For most systems (e.g. regular PC GPU miner) the host memory is RAM and
|
||||
device memory is the GPU global memory (e.g. GDDR5).
|
||||
|
||||
References mentioned in code comments:
|
||||
|
||||
1. https://github.com/ethereum/wiki/wiki/Ethash
|
||||
2. https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner.cpp
|
||||
3. https://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/
|
||||
4. http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_OpenCL_Programming_User_Guide.pdf
|
||||
|
||||
*/
|
||||
|
||||
type OpenCLDevice struct {
|
||||
deviceId int
|
||||
device *cl.Device
|
||||
openCL11 bool // OpenCL version 1.1 and 1.2 are handled a bit different
|
||||
openCL12 bool
|
||||
|
||||
dagBuf *cl.MemObject // Ethash full DAG in device mem
|
||||
headerBuf *cl.MemObject // Hash of block-to-mine in device mem
|
||||
searchBuffers []*cl.MemObject
|
||||
|
||||
searchKernel *cl.Kernel
|
||||
hashKernel *cl.Kernel
|
||||
|
||||
queue *cl.CommandQueue
|
||||
ctx *cl.Context
|
||||
workGroupSize int
|
||||
|
||||
nonceRand *mrand.Rand // seeded by crypto/rand, see comments where it's initialised
|
||||
result common.Hash
|
||||
}
|
||||
|
||||
type OpenCLMiner struct {
|
||||
mu sync.Mutex
|
||||
|
||||
ethash *Ethash // Ethash full DAG & cache in host mem
|
||||
|
||||
deviceIds []int
|
||||
devices []*OpenCLDevice
|
||||
|
||||
dagSize uint64
|
||||
|
||||
hashRate int32 // Go atomics & uint64 have some issues; int32 is supported on all platforms
|
||||
}
|
||||
|
||||
type pendingSearch struct {
|
||||
bufIndex uint32
|
||||
startNonce uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SIZEOF_UINT32 = 4
|
||||
|
||||
// See [1]
|
||||
ethashMixBytesLen = 128
|
||||
ethashAccesses = 64
|
||||
|
||||
// See [4]
|
||||
workGroupSize = 32 // must be multiple of 8
|
||||
maxSearchResults = 63
|
||||
searchBufSize = 2
|
||||
globalWorkSize = 1024 * 256
|
||||
)
|
||||
|
||||
func NewCL(deviceIds []int) *OpenCLMiner {
|
||||
ids := make([]int, len(deviceIds))
|
||||
copy(ids, deviceIds)
|
||||
return &OpenCLMiner{
|
||||
ethash: New(),
|
||||
dagSize: 0, // to see if we need to update DAG.
|
||||
deviceIds: ids,
|
||||
}
|
||||
}
|
||||
|
||||
func PrintDevices() {
|
||||
fmt.Println("=============================================")
|
||||
fmt.Println("============ OpenCL Device Info =============")
|
||||
fmt.Println("=============================================")
|
||||
|
||||
var found []*cl.Device
|
||||
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
fmt.Println("Plaform error (check your OpenCL installation):", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, p := range platforms {
|
||||
fmt.Println("Platform id ", i)
|
||||
fmt.Println("Platform Name ", p.Name())
|
||||
fmt.Println("Platform Vendor ", p.Vendor())
|
||||
fmt.Println("Platform Version ", p.Version())
|
||||
fmt.Println("Platform Extensions ", p.Extensions())
|
||||
fmt.Println("Platform Profile ", p.Profile())
|
||||
fmt.Println("")
|
||||
|
||||
devices, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
fmt.Println("Device error (check your GPU drivers) :", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range devices {
|
||||
fmt.Println("Device OpenCL id ", i)
|
||||
fmt.Println("Device id for mining ", len(found))
|
||||
fmt.Println("Device Name ", d.Name())
|
||||
fmt.Println("Vendor ", d.Vendor())
|
||||
fmt.Println("Version ", d.Version())
|
||||
fmt.Println("Driver version ", d.DriverVersion())
|
||||
fmt.Println("Address bits ", d.AddressBits())
|
||||
fmt.Println("Max clock freq ", d.MaxClockFrequency())
|
||||
fmt.Println("Global mem size ", d.GlobalMemSize())
|
||||
fmt.Println("Max constant buffer size", d.MaxConstantBufferSize())
|
||||
fmt.Println("Max mem alloc size ", d.MaxMemAllocSize())
|
||||
fmt.Println("Max compute units ", d.MaxComputeUnits())
|
||||
fmt.Println("Max work group size ", d.MaxWorkGroupSize())
|
||||
fmt.Println("Max work item sizes ", d.MaxWorkItemSizes())
|
||||
fmt.Println("=============================================")
|
||||
|
||||
found = append(found, d)
|
||||
}
|
||||
}
|
||||
if len(found) == 0 {
|
||||
fmt.Println("Found no GPU(s). Check that your OS can see the GPU(s)")
|
||||
} else {
|
||||
var idsFormat string
|
||||
for i := 0; i < len(found); i++ {
|
||||
idsFormat += strconv.Itoa(i)
|
||||
if i != len(found)-1 {
|
||||
idsFormat += ","
|
||||
}
|
||||
}
|
||||
fmt.Printf("Found %v devices. Benchmark first GPU: geth gpubench 0\n", len(found))
|
||||
fmt.Printf("Mine using all GPUs: geth --minegpu %v\n", idsFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// See [2]. We basically do the same here, but the Go OpenCL bindings
|
||||
// are at a slightly higher abtraction level.
|
||||
func InitCL(blockNum uint64, c *OpenCLMiner) error {
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run geth gpuinfo", err)
|
||||
}
|
||||
|
||||
var devices []*cl.Device
|
||||
for _, p := range platforms {
|
||||
ds, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run geth gpuinfo", err)
|
||||
}
|
||||
for _, d := range ds {
|
||||
devices = append(devices, d)
|
||||
}
|
||||
}
|
||||
|
||||
pow := New()
|
||||
_ = pow.getDAG(blockNum) // generates DAG if we don't have it
|
||||
pow.Light.getCache(blockNum) // and cache
|
||||
|
||||
c.ethash = pow
|
||||
dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum)))
|
||||
c.dagSize = dagSize
|
||||
|
||||
for _, id := range c.deviceIds {
|
||||
if id > len(devices)-1 {
|
||||
return fmt.Errorf("Device id not found. See available device ids with: geth gpuinfo")
|
||||
} else {
|
||||
err := initCLDevice(id, devices[id], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.devices) == 0 {
|
||||
return fmt.Errorf("No GPU devices found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
|
||||
devMaxAlloc := uint64(device.MaxMemAllocSize())
|
||||
devGlobalMem := uint64(device.GlobalMemSize())
|
||||
|
||||
// TODO: more fine grained version logic
|
||||
if device.Version() == "OpenCL 1.0" {
|
||||
fmt.Println("Device OpenCL version not supported: ", device.Version())
|
||||
return fmt.Errorf("opencl version not supported")
|
||||
}
|
||||
|
||||
var cl11, cl12 bool
|
||||
if device.Version() == "OpenCL 1.1" {
|
||||
cl11 = true
|
||||
}
|
||||
if device.Version() == "OpenCL 1.2" {
|
||||
cl12 = true
|
||||
}
|
||||
|
||||
// log warnings but carry on; some device drivers report inaccurate values
|
||||
if c.dagSize > devGlobalMem {
|
||||
fmt.Printf("WARNING: device memory may be insufficient: %v. DAG size: %v.\n", devGlobalMem, c.dagSize)
|
||||
}
|
||||
|
||||
if c.dagSize > devMaxAlloc {
|
||||
fmt.Printf("WARNING: DAG size (%v) larger than device max memory allocation size (%v).\n", c.dagSize, devMaxAlloc)
|
||||
fmt.Printf("You probably have to export GPU_MAX_ALLOC_PERCENT=95\n")
|
||||
}
|
||||
|
||||
fmt.Printf("Initialising device %v: %v\n", deviceId, device.Name())
|
||||
|
||||
context, err := cl.CreateContext([]*cl.Device{device})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating context: %v", err)
|
||||
}
|
||||
|
||||
// TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling?
|
||||
queue, err := context.CreateCommandQueue(device, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command queue err: %v", err)
|
||||
}
|
||||
|
||||
// See [4] section 3.2 and [3] "clBuildProgram".
|
||||
// The OpenCL kernel code is compiled at run-time.
|
||||
kvs := make(map[string]string, 4)
|
||||
kvs["GROUP_SIZE"] = strconv.FormatUint(workGroupSize, 10)
|
||||
kvs["DAG_SIZE"] = strconv.FormatUint(c.dagSize/ethashMixBytesLen, 10)
|
||||
kvs["ACCESSES"] = strconv.FormatUint(ethashAccesses, 10)
|
||||
kvs["MAX_OUTPUTS"] = strconv.FormatUint(maxSearchResults, 10)
|
||||
kernelCode := replaceWords(kernel, kvs)
|
||||
|
||||
program, err := context.CreateProgramWithSource([]string{kernelCode})
|
||||
if err != nil {
|
||||
return fmt.Errorf("program err: %v", err)
|
||||
}
|
||||
|
||||
/* if using AMD OpenCL impl, you can set this to debug on x86 CPU device.
|
||||
see AMD OpenCL programming guide section 4.2
|
||||
|
||||
export in shell before running:
|
||||
export AMD_OCL_BUILD_OPTIONS_APPEND="-g -O0"
|
||||
export CPU_MAX_COMPUTE_UNITS=1
|
||||
|
||||
buildOpts := "-g -cl-opt-disable"
|
||||
|
||||
*/
|
||||
buildOpts := ""
|
||||
err = program.BuildProgram([]*cl.Device{device}, buildOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("program build err: %v", err)
|
||||
}
|
||||
|
||||
var searchKernelName, hashKernelName string
|
||||
searchKernelName = "ethash_search"
|
||||
hashKernelName = "ethash_hash"
|
||||
|
||||
searchKernel, err := program.CreateKernel(searchKernelName)
|
||||
hashKernel, err := program.CreateKernel(hashKernelName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kernel err: %v", err)
|
||||
}
|
||||
|
||||
// TODO: when this DAG size appears, patch the Go bindings
|
||||
// (context.go) to work with uint64 as size_t
|
||||
if c.dagSize > math.MaxInt32 {
|
||||
fmt.Println("DAG too large for allocation.")
|
||||
return fmt.Errorf("DAG too large for alloc")
|
||||
}
|
||||
|
||||
// TODO: patch up Go bindings to work with size_t, will overflow if > maxint32
|
||||
// TODO: fuck. shit's gonna overflow around 2017-06-09 12:17:02
|
||||
dagBuf := *(new(*cl.MemObject))
|
||||
dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("allocating dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
// write DAG to device mem
|
||||
dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data)
|
||||
_, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing to dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
searchBuffers := make([]*cl.MemObject, searchBufSize)
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("search buffer err: %v", err)
|
||||
}
|
||||
searchBuffers[i] = searchBuff
|
||||
}
|
||||
|
||||
headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("header buffer err: %v", err)
|
||||
}
|
||||
|
||||
// Unique, random nonces are crucial for mining efficieny.
|
||||
// While we do not need cryptographically secure PRNG for nonces,
|
||||
// we want to have uniform distribution and minimal repetition of nonces.
|
||||
// We could guarantee strict uniqueness of nonces by generating unique ranges,
|
||||
// but a int64 seed from crypto/rand should be good enough.
|
||||
// we then use math/rand for speed and to avoid draining OS entropy pool
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nonceRand := mrand.New(mrand.NewSource(seed.Int64()))
|
||||
|
||||
deviceStruct := &OpenCLDevice{
|
||||
deviceId: deviceId,
|
||||
device: device,
|
||||
openCL11: cl11,
|
||||
openCL12: cl12,
|
||||
|
||||
dagBuf: dagBuf,
|
||||
headerBuf: headerBuf,
|
||||
searchBuffers: searchBuffers,
|
||||
|
||||
searchKernel: searchKernel,
|
||||
hashKernel: hashKernel,
|
||||
|
||||
queue: queue,
|
||||
ctx: context,
|
||||
|
||||
workGroupSize: workGroupSize,
|
||||
|
||||
nonceRand: nonceRand,
|
||||
}
|
||||
c.devices = append(c.devices, deviceStruct)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
|
||||
c.mu.Lock()
|
||||
newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
|
||||
if newDagSize > c.dagSize {
|
||||
// TODO: clean up buffers from previous DAG?
|
||||
err := InitCL(block.NumberU64(), c)
|
||||
if err != nil {
|
||||
fmt.Println("OpenCL init error: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
|
||||
select {
|
||||
case <-stop:
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
headerHash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
target256 := new(big.Int).Div(maxUint256, diff)
|
||||
target64 := new(big.Int).Rsh(target256, 192).Uint64()
|
||||
var zero uint32 = 0
|
||||
|
||||
d := c.devices[index]
|
||||
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all search buffers to complete
|
||||
err = d.queue.Finish()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clFinish : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(1, d.headerBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(2, d.dagBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(4, target64)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// wait on this before returning
|
||||
var preReturnEvent *cl.Event
|
||||
if d.openCL12 {
|
||||
preReturnEvent, err = d.ctx.CreateUserEvent()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search create CL user event : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
pending := make([]pendingSearch, 0, searchBufSize)
|
||||
var p *pendingSearch
|
||||
searchBufIndex := uint32(0)
|
||||
var checkNonce uint64
|
||||
loops := int64(0)
|
||||
prevHashRate := int32(0)
|
||||
start := time.Now().UnixNano()
|
||||
// we grab a single random nonce and sets this as argument to the kernel search function
|
||||
// the device will then add each local threads gid to the nonce, creating a unique nonce
|
||||
// for each device computing unit executing in parallel
|
||||
initNonce := uint64(d.nonceRand.Int63())
|
||||
for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
|
||||
select {
|
||||
case <-stop:
|
||||
|
||||
/*
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
atomic.AddInt32(&c.hashRate, -prevHashRate)
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
if (loops % (1 << 7)) == 0 {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
// TODO: verify if this is correct hash rate calculation
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256)
|
||||
hashrateDiff := int32(hashes) - prevHashRate
|
||||
prevHashRate = int32(hashes)
|
||||
atomic.AddInt32(&c.hashRate, hashrateDiff)
|
||||
}
|
||||
loops++
|
||||
|
||||
err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex])
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(3, nonce)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// execute kernel
|
||||
_, err := d.queue.EnqueueNDRangeKernel(
|
||||
d.searchKernel,
|
||||
[]int{0},
|
||||
[]int{globalWorkSize},
|
||||
[]int{d.workGroupSize},
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce})
|
||||
searchBufIndex = (searchBufIndex + 1) % searchBufSize
|
||||
|
||||
if len(pending) == searchBufSize {
|
||||
p = &(pending[searchBufIndex])
|
||||
cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true,
|
||||
cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32,
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueMapBuffer: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
results := cres.ByteSlice()
|
||||
nfound := binary.LittleEndian.Uint32(results)
|
||||
nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults)))
|
||||
// OpenCL returns the offsets from the start nonce
|
||||
for i := uint32(0); i < nfound; i++ {
|
||||
lo := (i + 1) * SIZEOF_UINT32
|
||||
hi := (i + 2) * SIZEOF_UINT32
|
||||
upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
|
||||
checkNonce = p.startNonce + upperNonce
|
||||
if checkNonce != 0 {
|
||||
// We verify that the nonce is indeed a solution by
|
||||
// executing the Ethash verification function (on the CPU).
|
||||
cache := c.ethash.Light.getCache(block.NumberU64())
|
||||
ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)
|
||||
|
||||
// TODO: return result first
|
||||
if ok && result.Big().Cmp(target256) <= 0 {
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
|
||||
}
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
return checkNonce, mixDigest.Bytes()
|
||||
}
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if d.openCL12 {
|
||||
err := cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clWaitForEvents: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Verify(block pow.Block) bool {
|
||||
return c.ethash.Light.Verify(block)
|
||||
}
|
||||
func (c *OpenCLMiner) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&c.hashRate))
|
||||
}
|
||||
func (c *OpenCLMiner) Turbo(on bool) {
|
||||
// This is GPU mining. Always be turbo.
|
||||
}
|
||||
|
||||
func replaceWords(text string, kvs map[string]string) string {
|
||||
for k, v := range kvs {
|
||||
text = strings.Replace(text, k, v, -1)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func logErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println("Error in OpenCL call:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(err error) error {
|
||||
return fmt.Errorf("arg err: %v", err)
|
||||
}
|
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
|
@ -1,600 +0,0 @@
|
|||
package ethash
|
||||
|
||||
/* DO NOT EDIT!!!
|
||||
|
||||
This code is version controlled at
|
||||
https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner_kernel.cl
|
||||
|
||||
If needed change it there first, then copy over here.
|
||||
*/
|
||||
|
||||
const kernel = `
|
||||
// author Tim Hughes <tim@twistedfury.com>
|
||||
// Tested on Radeon HD 7850
|
||||
// Hashrate: 15940347 hashes/s
|
||||
// Bandwidth: 124533 MB/s
|
||||
// search kernel should fit in <= 84 VGPRS (3 wavefronts)
|
||||
|
||||
#define THREADS_PER_HASH (128 / 16)
|
||||
#define HASHES_PER_LOOP (GROUP_SIZE / THREADS_PER_HASH)
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
__constant uint2 const Keccak_f1600_RC[24] = {
|
||||
(uint2)(0x00000001, 0x00000000),
|
||||
(uint2)(0x00008082, 0x00000000),
|
||||
(uint2)(0x0000808a, 0x80000000),
|
||||
(uint2)(0x80008000, 0x80000000),
|
||||
(uint2)(0x0000808b, 0x00000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008009, 0x80000000),
|
||||
(uint2)(0x0000008a, 0x00000000),
|
||||
(uint2)(0x00000088, 0x00000000),
|
||||
(uint2)(0x80008009, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x00000000),
|
||||
(uint2)(0x8000808b, 0x00000000),
|
||||
(uint2)(0x0000008b, 0x80000000),
|
||||
(uint2)(0x00008089, 0x80000000),
|
||||
(uint2)(0x00008003, 0x80000000),
|
||||
(uint2)(0x00008002, 0x80000000),
|
||||
(uint2)(0x00000080, 0x80000000),
|
||||
(uint2)(0x0000800a, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x80000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008080, 0x80000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008008, 0x80000000),
|
||||
};
|
||||
|
||||
void keccak_f1600_round(uint2* a, uint r, uint out_size)
|
||||
{
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
|
||||
uint2 b[25];
|
||||
uint2 t;
|
||||
|
||||
// Theta
|
||||
b[0] = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20];
|
||||
b[1] = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21];
|
||||
b[2] = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22];
|
||||
b[3] = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23];
|
||||
b[4] = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24];
|
||||
t = b[4] ^ (uint2)(b[1].x << 1 | b[1].y >> 31, b[1].y << 1 | b[1].x >> 31);
|
||||
a[0] ^= t;
|
||||
a[5] ^= t;
|
||||
a[10] ^= t;
|
||||
a[15] ^= t;
|
||||
a[20] ^= t;
|
||||
t = b[0] ^ (uint2)(b[2].x << 1 | b[2].y >> 31, b[2].y << 1 | b[2].x >> 31);
|
||||
a[1] ^= t;
|
||||
a[6] ^= t;
|
||||
a[11] ^= t;
|
||||
a[16] ^= t;
|
||||
a[21] ^= t;
|
||||
t = b[1] ^ (uint2)(b[3].x << 1 | b[3].y >> 31, b[3].y << 1 | b[3].x >> 31);
|
||||
a[2] ^= t;
|
||||
a[7] ^= t;
|
||||
a[12] ^= t;
|
||||
a[17] ^= t;
|
||||
a[22] ^= t;
|
||||
t = b[2] ^ (uint2)(b[4].x << 1 | b[4].y >> 31, b[4].y << 1 | b[4].x >> 31);
|
||||
a[3] ^= t;
|
||||
a[8] ^= t;
|
||||
a[13] ^= t;
|
||||
a[18] ^= t;
|
||||
a[23] ^= t;
|
||||
t = b[3] ^ (uint2)(b[0].x << 1 | b[0].y >> 31, b[0].y << 1 | b[0].x >> 31);
|
||||
a[4] ^= t;
|
||||
a[9] ^= t;
|
||||
a[14] ^= t;
|
||||
a[19] ^= t;
|
||||
a[24] ^= t;
|
||||
|
||||
// Rho Pi
|
||||
b[0] = a[0];
|
||||
b[10] = (uint2)(a[1].x << 1 | a[1].y >> 31, a[1].y << 1 | a[1].x >> 31);
|
||||
b[7] = (uint2)(a[10].x << 3 | a[10].y >> 29, a[10].y << 3 | a[10].x >> 29);
|
||||
b[11] = (uint2)(a[7].x << 6 | a[7].y >> 26, a[7].y << 6 | a[7].x >> 26);
|
||||
b[17] = (uint2)(a[11].x << 10 | a[11].y >> 22, a[11].y << 10 | a[11].x >> 22);
|
||||
b[18] = (uint2)(a[17].x << 15 | a[17].y >> 17, a[17].y << 15 | a[17].x >> 17);
|
||||
b[3] = (uint2)(a[18].x << 21 | a[18].y >> 11, a[18].y << 21 | a[18].x >> 11);
|
||||
b[5] = (uint2)(a[3].x << 28 | a[3].y >> 4, a[3].y << 28 | a[3].x >> 4);
|
||||
b[16] = (uint2)(a[5].y << 4 | a[5].x >> 28, a[5].x << 4 | a[5].y >> 28);
|
||||
b[8] = (uint2)(a[16].y << 13 | a[16].x >> 19, a[16].x << 13 | a[16].y >> 19);
|
||||
b[21] = (uint2)(a[8].y << 23 | a[8].x >> 9, a[8].x << 23 | a[8].y >> 9);
|
||||
b[24] = (uint2)(a[21].x << 2 | a[21].y >> 30, a[21].y << 2 | a[21].x >> 30);
|
||||
b[4] = (uint2)(a[24].x << 14 | a[24].y >> 18, a[24].y << 14 | a[24].x >> 18);
|
||||
b[15] = (uint2)(a[4].x << 27 | a[4].y >> 5, a[4].y << 27 | a[4].x >> 5);
|
||||
b[23] = (uint2)(a[15].y << 9 | a[15].x >> 23, a[15].x << 9 | a[15].y >> 23);
|
||||
b[19] = (uint2)(a[23].y << 24 | a[23].x >> 8, a[23].x << 24 | a[23].y >> 8);
|
||||
b[13] = (uint2)(a[19].x << 8 | a[19].y >> 24, a[19].y << 8 | a[19].x >> 24);
|
||||
b[12] = (uint2)(a[13].x << 25 | a[13].y >> 7, a[13].y << 25 | a[13].x >> 7);
|
||||
b[2] = (uint2)(a[12].y << 11 | a[12].x >> 21, a[12].x << 11 | a[12].y >> 21);
|
||||
b[20] = (uint2)(a[2].y << 30 | a[2].x >> 2, a[2].x << 30 | a[2].y >> 2);
|
||||
b[14] = (uint2)(a[20].x << 18 | a[20].y >> 14, a[20].y << 18 | a[20].x >> 14);
|
||||
b[22] = (uint2)(a[14].y << 7 | a[14].x >> 25, a[14].x << 7 | a[14].y >> 25);
|
||||
b[9] = (uint2)(a[22].y << 29 | a[22].x >> 3, a[22].x << 29 | a[22].y >> 3);
|
||||
b[6] = (uint2)(a[9].x << 20 | a[9].y >> 12, a[9].y << 20 | a[9].x >> 12);
|
||||
b[1] = (uint2)(a[6].y << 12 | a[6].x >> 20, a[6].x << 12 | a[6].y >> 20);
|
||||
|
||||
// Chi
|
||||
a[0] = bitselect(b[0] ^ b[2], b[0], b[1]);
|
||||
a[1] = bitselect(b[1] ^ b[3], b[1], b[2]);
|
||||
a[2] = bitselect(b[2] ^ b[4], b[2], b[3]);
|
||||
a[3] = bitselect(b[3] ^ b[0], b[3], b[4]);
|
||||
if (out_size >= 4)
|
||||
{
|
||||
a[4] = bitselect(b[4] ^ b[1], b[4], b[0]);
|
||||
a[5] = bitselect(b[5] ^ b[7], b[5], b[6]);
|
||||
a[6] = bitselect(b[6] ^ b[8], b[6], b[7]);
|
||||
a[7] = bitselect(b[7] ^ b[9], b[7], b[8]);
|
||||
a[8] = bitselect(b[8] ^ b[5], b[8], b[9]);
|
||||
if (out_size >= 8)
|
||||
{
|
||||
a[9] = bitselect(b[9] ^ b[6], b[9], b[5]);
|
||||
a[10] = bitselect(b[10] ^ b[12], b[10], b[11]);
|
||||
a[11] = bitselect(b[11] ^ b[13], b[11], b[12]);
|
||||
a[12] = bitselect(b[12] ^ b[14], b[12], b[13]);
|
||||
a[13] = bitselect(b[13] ^ b[10], b[13], b[14]);
|
||||
a[14] = bitselect(b[14] ^ b[11], b[14], b[10]);
|
||||
a[15] = bitselect(b[15] ^ b[17], b[15], b[16]);
|
||||
a[16] = bitselect(b[16] ^ b[18], b[16], b[17]);
|
||||
a[17] = bitselect(b[17] ^ b[19], b[17], b[18]);
|
||||
a[18] = bitselect(b[18] ^ b[15], b[18], b[19]);
|
||||
a[19] = bitselect(b[19] ^ b[16], b[19], b[15]);
|
||||
a[20] = bitselect(b[20] ^ b[22], b[20], b[21]);
|
||||
a[21] = bitselect(b[21] ^ b[23], b[21], b[22]);
|
||||
a[22] = bitselect(b[22] ^ b[24], b[22], b[23]);
|
||||
a[23] = bitselect(b[23] ^ b[20], b[23], b[24]);
|
||||
a[24] = bitselect(b[24] ^ b[21], b[24], b[20]);
|
||||
}
|
||||
}
|
||||
|
||||
// Iota
|
||||
a[0] ^= Keccak_f1600_RC[r];
|
||||
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
}
|
||||
|
||||
void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
|
||||
{
|
||||
for (uint i = in_size; i != 25; ++i)
|
||||
{
|
||||
a[i] = 0;
|
||||
}
|
||||
#if __ENDIAN_LITTLE__
|
||||
a[in_size] ^= 0x0000000000000001;
|
||||
a[24-out_size*2] ^= 0x8000000000000000;
|
||||
#else
|
||||
a[in_size] ^= 0x0100000000000000;
|
||||
a[24-out_size*2] ^= 0x0000000000000080;
|
||||
#endif
|
||||
|
||||
// Originally I unrolled the first and last rounds to interface
|
||||
// better with surrounding code, however I haven't done this
|
||||
// without causing the AMD compiler to blow up the VGPR usage.
|
||||
uint r = 0;
|
||||
do
|
||||
{
|
||||
// This dynamic branch stops the AMD compiler unrolling the loop
|
||||
// and additionally saves about 33% of the VGPRs, enough to gain another
|
||||
// wavefront. Ideally we'd get 4 in flight, but 3 is the best I can
|
||||
// massage out of the compiler. It doesn't really seem to matter how
|
||||
// much we try and help the compiler save VGPRs because it seems to throw
|
||||
// that information away, hence the implementation of keccak here
|
||||
// doesn't bother.
|
||||
if (isolate)
|
||||
{
|
||||
keccak_f1600_round((uint2*)a, r++, 25);
|
||||
}
|
||||
}
|
||||
while (r < 23);
|
||||
|
||||
// final round optimised for digest size
|
||||
keccak_f1600_round((uint2*)a, r++, out_size);
|
||||
}
|
||||
|
||||
#define copy(dst, src, count) for (uint i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
|
||||
|
||||
#define countof(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
uint fnv(uint x, uint y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint4 fnv4(uint4 x, uint4 y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint fnv_reduce(uint4 v)
|
||||
{
|
||||
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[32 / sizeof(ulong)];
|
||||
uint uints[32 / sizeof(uint)];
|
||||
} hash32_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[64 / sizeof(ulong)];
|
||||
uint4 uint4s[64 / sizeof(uint4)];
|
||||
} hash64_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint uints[128 / sizeof(uint)];
|
||||
uint4 uint4s[128 / sizeof(uint4)];
|
||||
} hash128_t;
|
||||
|
||||
hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
|
||||
{
|
||||
hash64_t init;
|
||||
uint const init_size = countof(init.ulongs);
|
||||
uint const hash_size = countof(header->ulongs);
|
||||
|
||||
// sha3_512(header .. nonce)
|
||||
ulong state[25];
|
||||
copy(state, header->ulongs, hash_size);
|
||||
state[hash_size] = nonce;
|
||||
keccak_f1600_no_absorb(state, hash_size + 1, init_size, isolate);
|
||||
|
||||
copy(init.ulongs, state, init_size);
|
||||
return init;
|
||||
}
|
||||
|
||||
uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, *share>=3 * DAG_SIZE / 4 ? g_dag3[*share - 3 * DAG_SIZE / 4].uint4s[thread_id] : *share>=DAG_SIZE / 2 ? g_dag2[*share - DAG_SIZE / 2].uint4s[thread_id] : *share>=DAG_SIZE / 4 ? g_dag1[*share - DAG_SIZE / 4].uint4s[thread_id]:g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
} while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
}
|
||||
while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
|
||||
{
|
||||
ulong state[25];
|
||||
|
||||
hash32_t hash;
|
||||
uint const hash_size = countof(hash.ulongs);
|
||||
uint const init_size = countof(init->ulongs);
|
||||
uint const mix_size = countof(mix->ulongs);
|
||||
|
||||
// keccak_256(keccak_512(header..nonce) .. mix);
|
||||
copy(state, init->ulongs, init_size);
|
||||
copy(state + init_size, mix->ulongs, mix_size);
|
||||
keccak_f1600_no_absorb(state, init_size+mix_size, hash_size, isolate);
|
||||
|
||||
// copy out
|
||||
copy(hash.ulongs, state, hash_size);
|
||||
return hash;
|
||||
}
|
||||
|
||||
hash32_t compute_hash_simple(
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
hash128_t mix;
|
||||
for (uint i = 0; i != countof(mix.uint4s); ++i)
|
||||
{
|
||||
mix.uint4s[i] = init.uint4s[i % countof(init.uint4s)];
|
||||
}
|
||||
|
||||
uint mix_val = mix.uints[0];
|
||||
uint init0 = mix.uints[0];
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
uint pi = fnv(init0 ^ a, mix_val) % DAG_SIZE;
|
||||
uint n = (a+1) % countof(mix.uints);
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != countof(mix.uints); ++i)
|
||||
{
|
||||
mix.uints[i] = fnv(mix.uints[i], g_dag[pi].uints[i]);
|
||||
mix_val = i == n ? mix.uints[i] : mix_val;
|
||||
}
|
||||
}
|
||||
while (++a != (ACCESSES & isolate));
|
||||
|
||||
// reduce to output
|
||||
hash32_t fnv_mix;
|
||||
for (uint i = 0; i != countof(fnv_mix.uints); ++i)
|
||||
{
|
||||
fnv_mix.uints[i] = fnv_reduce(mix.uint4s[i]);
|
||||
}
|
||||
|
||||
return final_hash(&init, &fnv_mix, isolate);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
hash64_t init;
|
||||
uint pad; // avoid lds bank conflicts
|
||||
};
|
||||
hash32_t mix;
|
||||
} compute_hash_share;
|
||||
|
||||
|
||||
hash32_t compute_hash(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop(thread_init, thread_id, share[hash_id].mix.uints, g_dag, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
|
||||
hash32_t compute_hash_chunks(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop_chunks(thread_init, thread_id, share[hash_id].mix.uints, g_dag, g_dag1, g_dag2, g_dag3, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_simple(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_simple(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min((uint)MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_chunks(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3,start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_chunks(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
`
|
|
@ -1,3 +0,0 @@
|
|||
language: go
|
||||
go: 1.3
|
||||
|
|
@ -1,117 +0,0 @@
|
|||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Port string
|
||||
Addr string
|
||||
Path string
|
||||
Msg chan string
|
||||
chs []chan string
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
Port string
|
||||
Addr string
|
||||
Path string
|
||||
ws *websocket.Conn
|
||||
}
|
||||
|
||||
var defaultPort = ":8080"
|
||||
|
||||
func NewServer() *Server {
|
||||
return &Server{
|
||||
Port: defaultPort,
|
||||
Addr: "localhost",
|
||||
Path: "/echo",
|
||||
Msg: make(chan string),
|
||||
chs: make([]chan string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewClient() Client {
|
||||
return Client{
|
||||
Port: defaultPort,
|
||||
Addr: "localhost",
|
||||
Path: "/echo",
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) ConnectAndListen() error {
|
||||
ws, err := websocket.Dial("ws://"+c.Addr+c.Port+c.Path, "", "http://"+c.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ws.Close()
|
||||
|
||||
var m string
|
||||
for {
|
||||
err := websocket.Message.Receive(ws, &m)
|
||||
if err != nil {
|
||||
fmt.Print(err)
|
||||
return err
|
||||
}
|
||||
fmt.Print(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) ListenAndServe() error {
|
||||
http.Handle(s.Path, websocket.Handler(func(ws *websocket.Conn) {
|
||||
defer ws.Close()
|
||||
|
||||
mc := make(chan string)
|
||||
s.chs = append(s.chs, mc)
|
||||
|
||||
for m := range mc {
|
||||
websocket.Message.Send(ws, m)
|
||||
}
|
||||
}))
|
||||
|
||||
go func() {
|
||||
for msg := range s.Msg {
|
||||
for _, c := range s.chs {
|
||||
go func(a chan string) {
|
||||
a <- msg
|
||||
}(c)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return http.ListenAndServe(s.Port, nil)
|
||||
}
|
||||
|
||||
func (s *Server) Log(msg string) {
|
||||
go func() { s.Msg <- msg }()
|
||||
}
|
||||
|
||||
func (s *Server) Logf(format string, a ...interface{}) {
|
||||
s.Log(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
var DefaultServer = NewServer()
|
||||
var DefaultClient = NewClient()
|
||||
|
||||
func ListenAndServe() error {
|
||||
return DefaultServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func ConnectAndListen() error {
|
||||
return DefaultClient.ConnectAndListen()
|
||||
}
|
||||
|
||||
func Log(msg string) {
|
||||
DefaultServer.Log(msg)
|
||||
}
|
||||
|
||||
func Logf(format string, a ...interface{}) {
|
||||
DefaultServer.Logf(format, a...)
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/gizak/termui"
|
||||
"github.com/gizak/termui/debug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// run as client
|
||||
if len(os.Args) > 1 {
|
||||
fmt.Print(debug.ConnectAndListen())
|
||||
return
|
||||
}
|
||||
|
||||
// run as server
|
||||
go func() { panic(debug.ListenAndServe()) }()
|
||||
|
||||
if err := termui.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer termui.Close()
|
||||
|
||||
//termui.UseTheme("helloworld")
|
||||
b := termui.NewBlock()
|
||||
b.Width = 20
|
||||
b.Height = 20
|
||||
b.Float = termui.AlignCenter
|
||||
b.BorderLabel = "[HELLO](fg-red,bg-white) [WORLD](fg-blue,bg-green)"
|
||||
|
||||
termui.Render(b)
|
||||
|
||||
termui.Handle("/sys", func(e termui.Event) {
|
||||
k, ok := e.Data.(termui.EvtKbd)
|
||||
debug.Logf("->%v\n", e)
|
||||
if ok && k.KeyStr == "q" {
|
||||
termui.StopLoop()
|
||||
}
|
||||
})
|
||||
|
||||
termui.Handle(("/usr"), func(e termui.Event) {
|
||||
debug.Logf("->%v\n", e)
|
||||
})
|
||||
|
||||
termui.Handle("/timer/1s", func(e termui.Event) {
|
||||
t := e.Data.(termui.EvtTimer)
|
||||
termui.SendCustomEvt("/usr/t", t.Count)
|
||||
|
||||
if t.Count%2 == 0 {
|
||||
b.BorderLabel = "[HELLO](fg-red,bg-green) [WORLD](fg-blue,bg-white)"
|
||||
} else {
|
||||
b.BorderLabel = "[HELLO](fg-blue,bg-white) [WORLD](fg-red,bg-green)"
|
||||
}
|
||||
|
||||
termui.Render(b)
|
||||
|
||||
})
|
||||
|
||||
termui.Loop()
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
|
@ -1,20 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
)
|
||||
|
||||
func main() {
|
||||
srv := httpu.Server{
|
||||
Addr: "239.255.255.250:1900",
|
||||
Multicast: true,
|
||||
Handler: httpu.HandlerFunc(func(r *http.Request) {
|
||||
log.Printf("Got %s %s message from %v: %v", r.Method, r.URL.Path, r.RemoteAddr, r.Header)
|
||||
}),
|
||||
}
|
||||
err := srv.ListenAndServe()
|
||||
log.Printf("Serving failed with error: %v", err)
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/huin/goupnp/dcps/internetgateway1"
|
||||
)
|
||||
|
||||
func main() {
|
||||
clients, errors, err := internetgateway1.NewWANPPPConnection1Clients()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Got %d errors finding servers and %d successfully discovered.\n",
|
||||
len(errors), len(clients))
|
||||
for i, e := range errors {
|
||||
fmt.Printf("Error finding server #%d: %v\n", i+1, e)
|
||||
}
|
||||
|
||||
for _, c := range clients {
|
||||
dev := &c.ServiceClient.RootDevice.Device
|
||||
srv := c.ServiceClient.Service
|
||||
fmt.Println(dev.FriendlyName, " :: ", srv.String())
|
||||
scpd, err := srv.RequestSCDP()
|
||||
if err != nil {
|
||||
fmt.Printf(" Error requesting service SCPD: %v\n", err)
|
||||
} else {
|
||||
fmt.Println(" Available actions:")
|
||||
for _, action := range scpd.Actions {
|
||||
fmt.Printf(" * %s\n", action.Name)
|
||||
for _, arg := range action.Arguments {
|
||||
var varDesc string
|
||||
if stateVar := scpd.GetStateVariable(arg.RelatedStateVariable); stateVar != nil {
|
||||
varDesc = fmt.Sprintf(" (%s)", stateVar.DataType.Name)
|
||||
}
|
||||
fmt.Printf(" * [%s] %s%s\n", arg.Direction, arg.Name, varDesc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetExternalIPAddress") != nil {
|
||||
ip, err := c.GetExternalIPAddress()
|
||||
fmt.Println("GetExternalIPAddress: ", ip, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetStatusInfo") != nil {
|
||||
status, lastErr, uptime, err := c.GetStatusInfo()
|
||||
fmt.Println("GetStatusInfo: ", status, lastErr, uptime, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetIdleDisconnectTime") != nil {
|
||||
idleTime, err := c.GetIdleDisconnectTime()
|
||||
fmt.Println("GetIdleDisconnectTime: ", idleTime, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("AddPortMapping") != nil {
|
||||
err := c.AddPortMapping("", 5000, "TCP", 5001, "192.168.1.2", true, "Test port mapping", 0)
|
||||
fmt.Println("AddPortMapping: ", err)
|
||||
}
|
||||
if scpd == nil || scpd.GetAction("DeletePortMapping") != nil {
|
||||
err := c.DeletePortMapping("", 5000, "TCP")
|
||||
fmt.Println("DeletePortMapping: ", err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/huin/goupnp/ssdp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := make(chan ssdp.Update)
|
||||
srv, reg := ssdp.NewServerAndRegistry()
|
||||
reg.AddListener(c)
|
||||
go listener(c)
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Print("ListenAndServe failed: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func listener(c <-chan ssdp.Update) {
|
||||
for u := range c {
|
||||
if u.Entry != nil {
|
||||
log.Printf("Event: %v USN: %s Entry: %#v", u.EventType, u.USN, *u.Entry)
|
||||
} else {
|
||||
log.Printf("Event: %v USN: %s Entry: <nil>", u.EventType, u.USN)
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +0,0 @@
|
|||
// Serves as examples of using the goupnp library.
|
||||
//
|
||||
// To run examples and see the output for your local network, run the following
|
||||
// command (specifically including the -v flag):
|
||||
// go test -v github.com/huin/goupnp/example
|
||||
package example
|
|
@ -1,603 +0,0 @@
|
|||
// +build gotask
|
||||
|
||||
package gotasks
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/huin/goupnp"
|
||||
"github.com/huin/goupnp/scpd"
|
||||
"github.com/huin/goutil/codegen"
|
||||
"github.com/jingweno/gotask/tasking"
|
||||
)
|
||||
|
||||
var (
|
||||
deviceURNPrefix = "urn:schemas-upnp-org:device:"
|
||||
serviceURNPrefix = "urn:schemas-upnp-org:service:"
|
||||
)
|
||||
|
||||
// DCP contains extra metadata to use when generating DCP source files.
|
||||
type DCPMetadata struct {
|
||||
Name string // What to name the Go DCP package.
|
||||
OfficialName string // Official name for the DCP.
|
||||
DocURL string // Optional - URL for futher documentation about the DCP.
|
||||
XMLSpecURL string // Where to download the XML spec from.
|
||||
// Any special-case functions to run against the DCP before writing it out.
|
||||
Hacks []DCPHackFn
|
||||
}
|
||||
|
||||
var dcpMetadata = []DCPMetadata{
|
||||
{
|
||||
Name: "internetgateway1",
|
||||
OfficialName: "Internet Gateway Device v1",
|
||||
DocURL: "http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v1-Device.pdf",
|
||||
XMLSpecURL: "http://upnp.org/specs/gw/UPnP-gw-IGD-TestFiles-20010921.zip",
|
||||
},
|
||||
{
|
||||
Name: "internetgateway2",
|
||||
OfficialName: "Internet Gateway Device v2",
|
||||
DocURL: "http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v2-Device.pdf",
|
||||
XMLSpecURL: "http://upnp.org/specs/gw/UPnP-gw-IGD-Testfiles-20110224.zip",
|
||||
Hacks: []DCPHackFn{
|
||||
func(dcp *DCP) error {
|
||||
missingURN := "urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"
|
||||
if _, ok := dcp.ServiceTypes[missingURN]; ok {
|
||||
return nil
|
||||
}
|
||||
urnParts, err := extractURNParts(missingURN, serviceURNPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dcp.ServiceTypes[missingURN] = urnParts
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "av1",
|
||||
OfficialName: "MediaServer v1 and MediaRenderer v1",
|
||||
DocURL: "http://upnp.org/specs/av/av1/",
|
||||
XMLSpecURL: "http://upnp.org/specs/av/UPnP-av-TestFiles-20070927.zip",
|
||||
},
|
||||
}
|
||||
|
||||
type DCPHackFn func(*DCP) error
|
||||
|
||||
// NAME
|
||||
// specgen - generates Go code from the UPnP specification files.
|
||||
//
|
||||
// DESCRIPTION
|
||||
// The specification is available for download from:
|
||||
//
|
||||
// OPTIONS
|
||||
// -s, --specs_dir=<spec directory>
|
||||
// Path to the specification storage directory. This is used to find (and download if not present) the specification ZIP files. Defaults to 'specs'
|
||||
// -o, --out_dir=<output directory>
|
||||
// Path to the output directory. This is is where the DCP source files will be placed. Should normally correspond to the directory for github.com/huin/goupnp/dcps. Defaults to '../dcps'
|
||||
// --nogofmt
|
||||
// Disable passing the output through gofmt. Do this if debugging code output problems and needing to see the generated code prior to being passed through gofmt.
|
||||
func TaskSpecgen(t *tasking.T) {
|
||||
specsDir := fallbackStrValue("specs", t.Flags.String("specs_dir"), t.Flags.String("s"))
|
||||
if err := os.MkdirAll(specsDir, os.ModePerm); err != nil {
|
||||
t.Fatalf("Could not create specs-dir %q: %v\n", specsDir, err)
|
||||
}
|
||||
outDir := fallbackStrValue("../dcps", t.Flags.String("out_dir"), t.Flags.String("o"))
|
||||
useGofmt := !t.Flags.Bool("nogofmt")
|
||||
|
||||
NEXT_DCP:
|
||||
for _, d := range dcpMetadata {
|
||||
specFilename := filepath.Join(specsDir, d.Name+".zip")
|
||||
err := acquireFile(specFilename, d.XMLSpecURL)
|
||||
if err != nil {
|
||||
t.Logf("Could not acquire spec for %s, skipping: %v\n", d.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
dcp := newDCP(d)
|
||||
if err := dcp.processZipFile(specFilename); err != nil {
|
||||
log.Printf("Error processing spec for %s in file %q: %v", d.Name, specFilename, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
for i, hack := range d.Hacks {
|
||||
if err := hack(dcp); err != nil {
|
||||
log.Printf("Error with Hack[%d] for %s: %v", i, d.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
}
|
||||
dcp.writePackage(outDir, useGofmt)
|
||||
if err := dcp.writePackage(outDir, useGofmt); err != nil {
|
||||
log.Printf("Error writing package %q: %v", dcp.Metadata.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fallbackStrValue(defaultValue string, values ...string) string {
|
||||
for _, v := range values {
|
||||
if v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func acquireFile(specFilename string, xmlSpecURL string) error {
|
||||
if f, err := os.Open(specFilename); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
f.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, err := http.Get(xmlSpecURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("could not download spec %q from %q: ",
|
||||
specFilename, xmlSpecURL, resp.Status)
|
||||
}
|
||||
|
||||
tmpFilename := specFilename + ".download"
|
||||
w, err := os.Create(tmpFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
_, err = io.Copy(w, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tmpFilename, specFilename)
|
||||
}
|
||||
|
||||
// DCP collects together information about a UPnP Device Control Protocol.
|
||||
type DCP struct {
|
||||
Metadata DCPMetadata
|
||||
DeviceTypes map[string]*URNParts
|
||||
ServiceTypes map[string]*URNParts
|
||||
Services []SCPDWithURN
|
||||
}
|
||||
|
||||
func newDCP(metadata DCPMetadata) *DCP {
|
||||
return &DCP{
|
||||
Metadata: metadata,
|
||||
DeviceTypes: make(map[string]*URNParts),
|
||||
ServiceTypes: make(map[string]*URNParts),
|
||||
}
|
||||
}
|
||||
|
||||
func (dcp *DCP) processZipFile(filename string) error {
|
||||
archive, err := zip.OpenReader(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading zip file %q: %v", filename, err)
|
||||
}
|
||||
defer archive.Close()
|
||||
for _, deviceFile := range globFiles("*/device/*.xml", archive) {
|
||||
if err := dcp.processDeviceFile(deviceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, scpdFile := range globFiles("*/service/*.xml", archive) {
|
||||
if err := dcp.processSCPDFile(scpdFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dcp *DCP) processDeviceFile(file *zip.File) error {
|
||||
var device goupnp.Device
|
||||
if err := unmarshalXmlFile(file, &device); err != nil {
|
||||
return fmt.Errorf("error decoding device XML from file %q: %v", file.Name, err)
|
||||
}
|
||||
var mainErr error
|
||||
device.VisitDevices(func(d *goupnp.Device) {
|
||||
t := strings.TrimSpace(d.DeviceType)
|
||||
if t != "" {
|
||||
u, err := extractURNParts(t, deviceURNPrefix)
|
||||
if err != nil {
|
||||
mainErr = err
|
||||
}
|
||||
dcp.DeviceTypes[t] = u
|
||||
}
|
||||
})
|
||||
device.VisitServices(func(s *goupnp.Service) {
|
||||
u, err := extractURNParts(s.ServiceType, serviceURNPrefix)
|
||||
if err != nil {
|
||||
mainErr = err
|
||||
}
|
||||
dcp.ServiceTypes[s.ServiceType] = u
|
||||
})
|
||||
return mainErr
|
||||
}
|
||||
|
||||
func (dcp *DCP) writePackage(outDir string, useGofmt bool) error {
|
||||
packageDirname := filepath.Join(outDir, dcp.Metadata.Name)
|
||||
err := os.MkdirAll(packageDirname, os.ModePerm)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
packageFilename := filepath.Join(packageDirname, dcp.Metadata.Name+".go")
|
||||
packageFile, err := os.Create(packageFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var output io.WriteCloser = packageFile
|
||||
if useGofmt {
|
||||
if output, err = codegen.NewGofmtWriteCloser(output); err != nil {
|
||||
packageFile.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = packageTmpl.Execute(output, dcp); err != nil {
|
||||
output.Close()
|
||||
return err
|
||||
}
|
||||
return output.Close()
|
||||
}
|
||||
|
||||
func (dcp *DCP) processSCPDFile(file *zip.File) error {
|
||||
scpd := new(scpd.SCPD)
|
||||
if err := unmarshalXmlFile(file, scpd); err != nil {
|
||||
return fmt.Errorf("error decoding SCPD XML from file %q: %v", file.Name, err)
|
||||
}
|
||||
scpd.Clean()
|
||||
urnParts, err := urnPartsFromSCPDFilename(file.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not recognize SCPD filename %q: %v", file.Name, err)
|
||||
}
|
||||
dcp.Services = append(dcp.Services, SCPDWithURN{
|
||||
URNParts: urnParts,
|
||||
SCPD: scpd,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
type SCPDWithURN struct {
|
||||
*URNParts
|
||||
SCPD *scpd.SCPD
|
||||
}
|
||||
|
||||
func (s *SCPDWithURN) WrapArguments(args []*scpd.Argument) (argumentWrapperList, error) {
|
||||
wrappedArgs := make(argumentWrapperList, len(args))
|
||||
for i, arg := range args {
|
||||
wa, err := s.wrapArgument(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedArgs[i] = wa
|
||||
}
|
||||
return wrappedArgs, nil
|
||||
}
|
||||
|
||||
func (s *SCPDWithURN) wrapArgument(arg *scpd.Argument) (*argumentWrapper, error) {
|
||||
relVar := s.SCPD.GetStateVariable(arg.RelatedStateVariable)
|
||||
if relVar == nil {
|
||||
return nil, fmt.Errorf("no such state variable: %q, for argument %q", arg.RelatedStateVariable, arg.Name)
|
||||
}
|
||||
cnv, ok := typeConvs[relVar.DataType.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown data type: %q, for state variable %q, for argument %q", relVar.DataType.Type, arg.RelatedStateVariable, arg.Name)
|
||||
}
|
||||
return &argumentWrapper{
|
||||
Argument: *arg,
|
||||
relVar: relVar,
|
||||
conv: cnv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type argumentWrapper struct {
|
||||
scpd.Argument
|
||||
relVar *scpd.StateVariable
|
||||
conv conv
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) AsParameter() string {
|
||||
return fmt.Sprintf("%s %s", arg.Name, arg.conv.ExtType)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) HasDoc() bool {
|
||||
rng := arg.relVar.AllowedValueRange
|
||||
return ((rng != nil && (rng.Minimum != "" || rng.Maximum != "" || rng.Step != "")) ||
|
||||
len(arg.relVar.AllowedValues) > 0)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Document() string {
|
||||
relVar := arg.relVar
|
||||
if rng := relVar.AllowedValueRange; rng != nil {
|
||||
var parts []string
|
||||
if rng.Minimum != "" {
|
||||
parts = append(parts, fmt.Sprintf("minimum=%s", rng.Minimum))
|
||||
}
|
||||
if rng.Maximum != "" {
|
||||
parts = append(parts, fmt.Sprintf("maximum=%s", rng.Maximum))
|
||||
}
|
||||
if rng.Step != "" {
|
||||
parts = append(parts, fmt.Sprintf("step=%s", rng.Step))
|
||||
}
|
||||
return "allowed value range: " + strings.Join(parts, ", ")
|
||||
}
|
||||
if len(relVar.AllowedValues) != 0 {
|
||||
return "allowed values: " + strings.Join(relVar.AllowedValues, ", ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Marshal() string {
|
||||
return fmt.Sprintf("soap.Marshal%s(%s)", arg.conv.FuncSuffix, arg.Name)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Unmarshal(objVar string) string {
|
||||
return fmt.Sprintf("soap.Unmarshal%s(%s.%s)", arg.conv.FuncSuffix, objVar, arg.Name)
|
||||
}
|
||||
|
||||
type argumentWrapperList []*argumentWrapper
|
||||
|
||||
func (args argumentWrapperList) HasDoc() bool {
|
||||
for _, arg := range args {
|
||||
if arg.HasDoc() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type conv struct {
|
||||
FuncSuffix string
|
||||
ExtType string
|
||||
}
|
||||
|
||||
// typeConvs maps from a SOAP type (e.g "fixed.14.4") to the function name
|
||||
// suffix inside the soap module (e.g "Fixed14_4") and the Go type.
|
||||
var typeConvs = map[string]conv{
|
||||
"ui1": conv{"Ui1", "uint8"},
|
||||
"ui2": conv{"Ui2", "uint16"},
|
||||
"ui4": conv{"Ui4", "uint32"},
|
||||
"i1": conv{"I1", "int8"},
|
||||
"i2": conv{"I2", "int16"},
|
||||
"i4": conv{"I4", "int32"},
|
||||
"int": conv{"Int", "int64"},
|
||||
"r4": conv{"R4", "float32"},
|
||||
"r8": conv{"R8", "float64"},
|
||||
"number": conv{"R8", "float64"}, // Alias for r8.
|
||||
"fixed.14.4": conv{"Fixed14_4", "float64"},
|
||||
"float": conv{"R8", "float64"},
|
||||
"char": conv{"Char", "rune"},
|
||||
"string": conv{"String", "string"},
|
||||
"date": conv{"Date", "time.Time"},
|
||||
"dateTime": conv{"DateTime", "time.Time"},
|
||||
"dateTime.tz": conv{"DateTimeTz", "time.Time"},
|
||||
"time": conv{"TimeOfDay", "soap.TimeOfDay"},
|
||||
"time.tz": conv{"TimeOfDayTz", "soap.TimeOfDay"},
|
||||
"boolean": conv{"Boolean", "bool"},
|
||||
"bin.base64": conv{"BinBase64", "[]byte"},
|
||||
"bin.hex": conv{"BinHex", "[]byte"},
|
||||
"uri": conv{"URI", "*url.URL"},
|
||||
}
|
||||
|
||||
func globFiles(pattern string, archive *zip.ReadCloser) []*zip.File {
|
||||
var files []*zip.File
|
||||
for _, f := range archive.File {
|
||||
if matched, err := path.Match(pattern, f.Name); err != nil {
|
||||
// This shouldn't happen - all patterns are hard-coded, errors in them
|
||||
// are a programming error.
|
||||
panic(err)
|
||||
} else if matched {
|
||||
files = append(files, f)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func unmarshalXmlFile(file *zip.File, data interface{}) error {
|
||||
r, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := xml.NewDecoder(r)
|
||||
r.Close()
|
||||
return decoder.Decode(data)
|
||||
}
|
||||
|
||||
type URNParts struct {
|
||||
URN string
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
|
||||
func (u *URNParts) Const() string {
|
||||
return fmt.Sprintf("URN_%s_%s", u.Name, u.Version)
|
||||
}
|
||||
|
||||
// extractURNParts extracts the name and version from a URN string.
|
||||
func extractURNParts(urn, expectedPrefix string) (*URNParts, error) {
|
||||
if !strings.HasPrefix(urn, expectedPrefix) {
|
||||
return nil, fmt.Errorf("%q does not have expected prefix %q", urn, expectedPrefix)
|
||||
}
|
||||
parts := strings.SplitN(strings.TrimPrefix(urn, expectedPrefix), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("%q does not have a name and version", urn)
|
||||
}
|
||||
name, version := parts[0], parts[1]
|
||||
return &URNParts{urn, name, version}, nil
|
||||
}
|
||||
|
||||
var scpdFilenameRe = regexp.MustCompile(
|
||||
`.*/([a-zA-Z0-9]+)([0-9]+)\.xml`)
|
||||
|
||||
func urnPartsFromSCPDFilename(filename string) (*URNParts, error) {
|
||||
parts := scpdFilenameRe.FindStringSubmatch(filename)
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("SCPD filename %q does not have expected number of parts", filename)
|
||||
}
|
||||
name, version := parts[1], parts[2]
|
||||
return &URNParts{
|
||||
URN: serviceURNPrefix + name + ":" + version,
|
||||
Name: name,
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var packageTmpl = template.Must(template.New("package").Parse(`{{$name := .Metadata.Name}}
|
||||
// Client for UPnP Device Control Protocol {{.Metadata.OfficialName}}.
|
||||
// {{if .Metadata.DocURL}}
|
||||
// This DCP is documented in detail at: {{.Metadata.DocURL}}{{end}}
|
||||
//
|
||||
// Typically, use one of the New* functions to create clients for services.
|
||||
package {{$name}}
|
||||
|
||||
// Generated file - do not edit by hand. See README.md
|
||||
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/huin/goupnp"
|
||||
"github.com/huin/goupnp/soap"
|
||||
)
|
||||
|
||||
// Hack to avoid Go complaining if time isn't used.
|
||||
var _ time.Time
|
||||
|
||||
// Device URNs:
|
||||
const ({{range .DeviceTypes}}
|
||||
{{.Const}} = "{{.URN}}"{{end}}
|
||||
)
|
||||
|
||||
// Service URNs:
|
||||
const ({{range .ServiceTypes}}
|
||||
{{.Const}} = "{{.URN}}"{{end}}
|
||||
)
|
||||
|
||||
{{range .Services}}
|
||||
{{$srv := .}}
|
||||
{{$srvIdent := printf "%s%s" .Name .Version}}
|
||||
|
||||
// {{$srvIdent}} is a client for UPnP SOAP service with URN "{{.URN}}". See
|
||||
// goupnp.ServiceClient, which contains RootDevice and Service attributes which
|
||||
// are provided for informational value.
|
||||
type {{$srvIdent}} struct {
|
||||
goupnp.ServiceClient
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}Clients discovers instances of the service on the network,
|
||||
// and returns clients to any that are found. errors will contain an error for
|
||||
// any devices that replied but which could not be queried, and err will be set
|
||||
// if the discovery process failed outright.
|
||||
//
|
||||
// This is a typical entry calling point into this package.
|
||||
func New{{$srvIdent}}Clients() (clients []*{{$srvIdent}}, errors []error, err error) {
|
||||
var genericClients []goupnp.ServiceClient
|
||||
if genericClients, errors, err = goupnp.NewServiceClients({{$srv.Const}}); err != nil {
|
||||
return
|
||||
}
|
||||
clients = new{{$srvIdent}}ClientsFromGenericClients(genericClients)
|
||||
return
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}ClientsByURL discovers instances of the service at the given
|
||||
// URL, and returns clients to any that are found. An error is returned if
|
||||
// there was an error probing the service.
|
||||
//
|
||||
// This is a typical entry calling point into this package when reusing an
|
||||
// previously discovered service URL.
|
||||
func New{{$srvIdent}}ClientsByURL(loc *url.URL) ([]*{{$srvIdent}}, error) {
|
||||
genericClients, err := goupnp.NewServiceClientsByURL(loc, {{$srv.Const}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new{{$srvIdent}}ClientsFromGenericClients(genericClients), nil
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}ClientsFromRootDevice discovers instances of the service in
|
||||
// a given root device, and returns clients to any that are found. An error is
|
||||
// returned if there was not at least one instance of the service within the
|
||||
// device. The location parameter is simply assigned to the Location attribute
|
||||
// of the wrapped ServiceClient(s).
|
||||
//
|
||||
// This is a typical entry calling point into this package when reusing an
|
||||
// previously discovered root device.
|
||||
func New{{$srvIdent}}ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*{{$srvIdent}}, error) {
|
||||
genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, {{$srv.Const}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new{{$srvIdent}}ClientsFromGenericClients(genericClients), nil
|
||||
}
|
||||
|
||||
func new{{$srvIdent}}ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*{{$srvIdent}} {
|
||||
clients := make([]*{{$srvIdent}}, len(genericClients))
|
||||
for i := range genericClients {
|
||||
clients[i] = &{{$srvIdent}}{genericClients[i]}
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
{{range .SCPD.Actions}}{{/* loops over *SCPDWithURN values */}}
|
||||
|
||||
{{$winargs := $srv.WrapArguments .InputArguments}}
|
||||
{{$woutargs := $srv.WrapArguments .OutputArguments}}
|
||||
{{if $winargs.HasDoc}}
|
||||
//
|
||||
// Arguments:{{range $winargs}}{{if .HasDoc}}
|
||||
//
|
||||
// * {{.Name}}: {{.Document}}{{end}}{{end}}{{end}}
|
||||
{{if $woutargs.HasDoc}}
|
||||
//
|
||||
// Return values:{{range $woutargs}}{{if .HasDoc}}
|
||||
//
|
||||
// * {{.Name}}: {{.Document}}{{end}}{{end}}{{end}}
|
||||
func (client *{{$srvIdent}}) {{.Name}}({{range $winargs}}{{/*
|
||||
*/}}{{.AsParameter}}, {{end}}{{/*
|
||||
*/}}) ({{range $woutargs}}{{/*
|
||||
*/}}{{.AsParameter}}, {{end}} err error) {
|
||||
// Request structure.
|
||||
request := {{if $winargs}}&{{template "argstruct" $winargs}}{{"{}"}}{{else}}{{"interface{}(nil)"}}{{end}}
|
||||
// BEGIN Marshal arguments into request.
|
||||
{{range $winargs}}
|
||||
if request.{{.Name}}, err = {{.Marshal}}; err != nil {
|
||||
return
|
||||
}{{end}}
|
||||
// END Marshal arguments into request.
|
||||
|
||||
// Response structure.
|
||||
response := {{if $woutargs}}&{{template "argstruct" $woutargs}}{{"{}"}}{{else}}{{"interface{}(nil)"}}{{end}}
|
||||
|
||||
// Perform the SOAP call.
|
||||
if err = client.SOAPClient.PerformAction({{$srv.URNParts.Const}}, "{{.Name}}", request, response); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// BEGIN Unmarshal arguments from response.
|
||||
{{range $woutargs}}
|
||||
if {{.Name}}, err = {{.Unmarshal "response"}}; err != nil {
|
||||
return
|
||||
}{{end}}
|
||||
// END Unmarshal arguments from response.
|
||||
return
|
||||
}
|
||||
{{end}}{{/* range .SCPD.Actions */}}
|
||||
{{end}}{{/* range .Services */}}
|
||||
|
||||
{{define "argstruct"}}struct {{"{"}}{{range .}}
|
||||
{{.Name}} string
|
||||
{{end}}{{"}"}}{{end}}
|
||||
`))
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright (c) 2010 Jack Palevich. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,7 +0,0 @@
|
|||
# gateway
|
||||
|
||||
A very simple library for discovering the IP address of the local LAN gateway.
|
||||
|
||||
Provides implementations for Linux, OS X (Darwin) and Windows.
|
||||
|
||||
Pull requests for other OSs happily considered!
|
|
@ -1,40 +0,0 @@
|
|||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n", "get", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Darwin route out format is always like this:
|
||||
// route to: default
|
||||
// destination: default
|
||||
// mask: default
|
||||
// gateway: 192.168.1.1
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("gateway:")) {
|
||||
gatewayFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(gatewayFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func discoverGatewayUsingIp() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("ip", "route", "show")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux 'ip route show' format looks like this:
|
||||
// default via 192.168.178.1 dev wlp3s0 metric 303
|
||||
// 192.168.178.0/24 dev wlp3s0 proto kernel scope link src 192.168.178.76 metric 303
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("default")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func discoverGatewayUsingRoute() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux route out format is always like this:
|
||||
// Kernel IP routing table
|
||||
// Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
// 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 eth0
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("0.0.0.0")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
ip, err = discoverGatewayUsingRoute()
|
||||
if err != nil {
|
||||
ip, err = discoverGatewayUsingIp()
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
// +build !darwin,!linux,!windows
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
err = fmt.Errorf("DiscoverGateway not implemented for OS %s", runtime.GOOS)
|
||||
return
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "print", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Windows route output format is always like this:
|
||||
// ===========================================================================
|
||||
// Active Routes:
|
||||
// Network Destination Netmask Gateway Interface Metric
|
||||
// 0.0.0.0 0.0.0.0 192.168.1.1 192.168.1.100 20
|
||||
// ===========================================================================
|
||||
// I'm trying to pick the active route,
|
||||
// then jump 2 lines and pick the third IP
|
||||
// Not using regex because output is quite standard from Windows XP to 8 (NEEDS TESTING)
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for idx, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("Active Routes:")) {
|
||||
ipFields := bytes.Fields(outputLines[idx+2])
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
20
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go
generated
vendored
20
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
r := metrics.NewRegistry()
|
||||
for i := 0; i < 10000; i++ {
|
||||
r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter())
|
||||
r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge())
|
||||
r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64())
|
||||
r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028)))
|
||||
r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))
|
||||
r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter())
|
||||
}
|
||||
time.Sleep(600e9)
|
||||
}
|
154
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go
generated
vendored
154
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go
generated
vendored
|
@ -1,154 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
// "github.com/rcrowley/go-metrics/stathat"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
// "syslog"
|
||||
"time"
|
||||
)
|
||||
|
||||
const fanout = 10
|
||||
|
||||
func main() {
|
||||
|
||||
r := metrics.NewRegistry()
|
||||
|
||||
c := metrics.NewCounter()
|
||||
r.Register("foo", c)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
c.Dec(19)
|
||||
time.Sleep(300e6)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
c.Inc(47)
|
||||
time.Sleep(400e6)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
g := metrics.NewGauge()
|
||||
r.Register("bar", g)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
g.Update(19)
|
||||
time.Sleep(300e6)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
g.Update(47)
|
||||
time.Sleep(400e6)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
gf := metrics.NewGaugeFloat64()
|
||||
r.Register("barfloat64", gf)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
g.Update(19.0)
|
||||
time.Sleep(300e6)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
g.Update(47.0)
|
||||
time.Sleep(400e6)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) {
|
||||
if 0 < rand.Intn(2) {
|
||||
h.Healthy()
|
||||
} else {
|
||||
h.Unhealthy(errors.New("baz"))
|
||||
}
|
||||
})
|
||||
r.Register("baz", hc)
|
||||
|
||||
s := metrics.NewExpDecaySample(1028, 0.015)
|
||||
//s := metrics.NewUniformSample(1028)
|
||||
h := metrics.NewHistogram(s)
|
||||
r.Register("bang", h)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
h.Update(19)
|
||||
time.Sleep(300e6)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
h.Update(47)
|
||||
time.Sleep(400e6)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
m := metrics.NewMeter()
|
||||
r.Register("quux", m)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
m.Mark(19)
|
||||
time.Sleep(300e6)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
m.Mark(47)
|
||||
time.Sleep(400e6)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
t := metrics.NewTimer()
|
||||
r.Register("hooah", t)
|
||||
for i := 0; i < fanout; i++ {
|
||||
go func() {
|
||||
for {
|
||||
t.Time(func() { time.Sleep(300e6) })
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
t.Time(func() { time.Sleep(400e6) })
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
metrics.RegisterDebugGCStats(r)
|
||||
go metrics.CaptureDebugGCStats(r, 5e9)
|
||||
|
||||
metrics.RegisterRuntimeMemStats(r)
|
||||
go metrics.CaptureRuntimeMemStats(r, 5e9)
|
||||
|
||||
metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
||||
|
||||
/*
|
||||
w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
|
||||
if nil != err { log.Fatalln(err) }
|
||||
metrics.Syslog(r, 60e9, w)
|
||||
*/
|
||||
|
||||
/*
|
||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
||||
metrics.Graphite(r, 10e9, "metrics", addr)
|
||||
*/
|
||||
|
||||
/*
|
||||
stathat.Stathat(r, 10e9, "example@example.com")
|
||||
*/
|
||||
|
||||
}
|
22
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go
generated
vendored
22
Godeps/_workspace/src/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
)
|
||||
|
||||
func main() {
|
||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
if nil != err {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Println("listening", l.Addr())
|
||||
for {
|
||||
c, err := l.AcceptTCP()
|
||||
if nil != err {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Println("accepted", c.RemoteAddr())
|
||||
}
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package librato
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const Operations = "operations"
|
||||
const OperationsShort = "ops"
|
||||
|
||||
type LibratoClient struct {
|
||||
Email, Token string
|
||||
}
|
||||
|
||||
// property strings
|
||||
const (
|
||||
// display attributes
|
||||
Color = "color"
|
||||
DisplayMax = "display_max"
|
||||
DisplayMin = "display_min"
|
||||
DisplayUnitsLong = "display_units_long"
|
||||
DisplayUnitsShort = "display_units_short"
|
||||
DisplayStacked = "display_stacked"
|
||||
DisplayTransform = "display_transform"
|
||||
// special gauge display attributes
|
||||
SummarizeFunction = "summarize_function"
|
||||
Aggregate = "aggregate"
|
||||
|
||||
// metric keys
|
||||
Name = "name"
|
||||
Period = "period"
|
||||
Description = "description"
|
||||
DisplayName = "display_name"
|
||||
Attributes = "attributes"
|
||||
|
||||
// measurement keys
|
||||
MeasureTime = "measure_time"
|
||||
Source = "source"
|
||||
Value = "value"
|
||||
|
||||
// special gauge keys
|
||||
Count = "count"
|
||||
Sum = "sum"
|
||||
Max = "max"
|
||||
Min = "min"
|
||||
SumSquares = "sum_squares"
|
||||
|
||||
// batch keys
|
||||
Counters = "counters"
|
||||
Gauges = "gauges"
|
||||
|
||||
MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
|
||||
)
|
||||
|
||||
type Measurement map[string]interface{}
|
||||
type Metric map[string]interface{}
|
||||
|
||||
type Batch struct {
|
||||
Gauges []Measurement `json:"gauges,omitempty"`
|
||||
Counters []Measurement `json:"counters,omitempty"`
|
||||
MeasureTime int64 `json:"measure_time"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
|
||||
var (
|
||||
js []byte
|
||||
req *http.Request
|
||||
resp *http.Response
|
||||
)
|
||||
|
||||
if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if js, err = json.Marshal(batch); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.SetBasicAuth(self.Email, self.Token)
|
||||
|
||||
if resp, err = http.DefaultClient.Do(req); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var body []byte
|
||||
if body, err = ioutil.ReadAll(resp.Body); err != nil {
|
||||
body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
|
||||
}
|
||||
err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,235 +0,0 @@
|
|||
package librato
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// a regexp for extracting the unit from time.Duration.String
|
||||
var unitRegexp = regexp.MustCompile("[^\\d]+$")
|
||||
|
||||
// a helper that turns a time.Duration into librato display attributes for timer metrics
|
||||
func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
|
||||
attrs = make(map[string]interface{})
|
||||
attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
|
||||
attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
|
||||
return
|
||||
}
|
||||
|
||||
type Reporter struct {
|
||||
Email, Token string
|
||||
Namespace string
|
||||
Source string
|
||||
Interval time.Duration
|
||||
Registry metrics.Registry
|
||||
Percentiles []float64 // percentiles to report on histogram metrics
|
||||
TimerAttributes map[string]interface{} // units in which timers will be displayed
|
||||
intervalSec int64
|
||||
}
|
||||
|
||||
func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
|
||||
return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
|
||||
}
|
||||
|
||||
func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
|
||||
NewReporter(r, d, e, t, s, p, u).Run()
|
||||
}
|
||||
|
||||
func (self *Reporter) Run() {
|
||||
log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
|
||||
ticker := time.Tick(self.Interval)
|
||||
metricsApi := &LibratoClient{self.Email, self.Token}
|
||||
for now := range ticker {
|
||||
var metrics Batch
|
||||
var err error
|
||||
if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
|
||||
log.Printf("ERROR constructing librato request body %s", err)
|
||||
continue
|
||||
}
|
||||
if err := metricsApi.PostMetrics(metrics); err != nil {
|
||||
log.Printf("ERROR sending metrics to librato %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculate sum of squares from data provided by metrics.Histogram
|
||||
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
|
||||
func sumSquares(s metrics.Sample) float64 {
|
||||
count := float64(s.Count())
|
||||
sumSquared := math.Pow(count*s.Mean(), 2)
|
||||
sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
|
||||
if math.IsNaN(sumSquares) {
|
||||
return 0.0
|
||||
}
|
||||
return sumSquares
|
||||
}
|
||||
func sumSquaresTimer(t metrics.Timer) float64 {
|
||||
count := float64(t.Count())
|
||||
sumSquared := math.Pow(count*t.Mean(), 2)
|
||||
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
|
||||
if math.IsNaN(sumSquares) {
|
||||
return 0.0
|
||||
}
|
||||
return sumSquares
|
||||
}
|
||||
|
||||
func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
|
||||
snapshot = Batch{
|
||||
// coerce timestamps to a stepping fn so that they line up in Librato graphs
|
||||
MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
|
||||
Source: self.Source,
|
||||
}
|
||||
snapshot.Gauges = make([]Measurement, 0)
|
||||
snapshot.Counters = make([]Measurement, 0)
|
||||
histogramGaugeCount := 1 + len(self.Percentiles)
|
||||
r.Each(func(name string, metric interface{}) {
|
||||
if self.Namespace != "" {
|
||||
name = fmt.Sprintf("%s.%s", self.Namespace, name)
|
||||
}
|
||||
measurement := Measurement{}
|
||||
measurement[Period] = self.Interval.Seconds()
|
||||
switch m := metric.(type) {
|
||||
case metrics.Counter:
|
||||
if m.Count() > 0 {
|
||||
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
|
||||
measurement[Value] = float64(m.Count())
|
||||
measurement[Attributes] = map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
}
|
||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
||||
}
|
||||
case metrics.Gauge:
|
||||
measurement[Name] = name
|
||||
measurement[Value] = float64(m.Value())
|
||||
snapshot.Gauges = append(snapshot.Gauges, measurement)
|
||||
case metrics.GaugeFloat64:
|
||||
measurement[Name] = name
|
||||
measurement[Value] = float64(m.Value())
|
||||
snapshot.Gauges = append(snapshot.Gauges, measurement)
|
||||
case metrics.Histogram:
|
||||
if m.Count() > 0 {
|
||||
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
|
||||
s := m.Sample()
|
||||
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
|
||||
measurement[Count] = uint64(s.Count())
|
||||
measurement[Max] = float64(s.Max())
|
||||
measurement[Min] = float64(s.Min())
|
||||
measurement[Sum] = float64(s.Sum())
|
||||
measurement[SumSquares] = sumSquares(s)
|
||||
gauges[0] = measurement
|
||||
for i, p := range self.Percentiles {
|
||||
gauges[i+1] = Measurement{
|
||||
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
|
||||
Value: s.Percentile(p),
|
||||
Period: measurement[Period],
|
||||
}
|
||||
}
|
||||
snapshot.Gauges = append(snapshot.Gauges, gauges...)
|
||||
}
|
||||
case metrics.Meter:
|
||||
measurement[Name] = name
|
||||
measurement[Value] = float64(m.Count())
|
||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
||||
snapshot.Gauges = append(snapshot.Gauges,
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "1min"),
|
||||
Value: m.Rate1(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "5min"),
|
||||
Value: m.Rate5(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "15min"),
|
||||
Value: m.Rate15(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
)
|
||||
case metrics.Timer:
|
||||
measurement[Name] = name
|
||||
measurement[Value] = float64(m.Count())
|
||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
||||
if m.Count() > 0 {
|
||||
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
|
||||
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
|
||||
gauges[0] = Measurement{
|
||||
Name: libratoName,
|
||||
Count: uint64(m.Count()),
|
||||
Sum: m.Mean() * float64(m.Count()),
|
||||
Max: float64(m.Max()),
|
||||
Min: float64(m.Min()),
|
||||
SumSquares: sumSquaresTimer(m),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: self.TimerAttributes,
|
||||
}
|
||||
for i, p := range self.Percentiles {
|
||||
gauges[i+1] = Measurement{
|
||||
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
|
||||
Value: m.Percentile(p),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: self.TimerAttributes,
|
||||
}
|
||||
}
|
||||
snapshot.Gauges = append(snapshot.Gauges, gauges...)
|
||||
snapshot.Gauges = append(snapshot.Gauges,
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
|
||||
Value: m.Rate1(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
|
||||
Value: m.Rate5(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
Measurement{
|
||||
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
|
||||
Value: m.Rate15(),
|
||||
Period: int64(self.Interval.Seconds()),
|
||||
Attributes: map[string]interface{}{
|
||||
DisplayUnitsLong: Operations,
|
||||
DisplayUnitsShort: OperationsShort,
|
||||
DisplayMin: "0",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
// Metrics output to StatHat.
|
||||
package stathat
|
||||
|
||||
import (
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/stathat/go"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Stathat(r metrics.Registry, d time.Duration, userkey string) {
|
||||
for {
|
||||
if err := sh(r, userkey); nil != err {
|
||||
log.Println(err)
|
||||
}
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
func sh(r metrics.Registry, userkey string) error {
|
||||
r.Each(func(name string, i interface{}) {
|
||||
switch metric := i.(type) {
|
||||
case metrics.Counter:
|
||||
stathat.PostEZCount(name, userkey, int(metric.Count()))
|
||||
case metrics.Gauge:
|
||||
stathat.PostEZValue(name, userkey, float64(metric.Value()))
|
||||
case metrics.GaugeFloat64:
|
||||
stathat.PostEZValue(name, userkey, float64(metric.Value()))
|
||||
case metrics.Histogram:
|
||||
h := metric.Snapshot()
|
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||
stathat.PostEZCount(name+".count", userkey, int(h.Count()))
|
||||
stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
|
||||
stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
|
||||
stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
|
||||
stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
|
||||
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
|
||||
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
|
||||
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
|
||||
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
|
||||
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
|
||||
case metrics.Meter:
|
||||
m := metric.Snapshot()
|
||||
stathat.PostEZCount(name+".count", userkey, int(m.Count()))
|
||||
stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
|
||||
stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
|
||||
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
|
||||
stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
|
||||
case metrics.Timer:
|
||||
t := metric.Snapshot()
|
||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||
stathat.PostEZCount(name+".count", userkey, int(t.Count()))
|
||||
stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
|
||||
stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
|
||||
stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
|
||||
stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
|
||||
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
|
||||
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
|
||||
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
|
||||
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
|
||||
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
|
||||
stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
|
||||
stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
|
||||
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
|
||||
stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/robertkrimen/otto/file"
|
||||
)
|
||||
|
||||
// CommentPosition determines where the comment is in a given context
|
||||
type CommentPosition int
|
||||
|
||||
const (
|
||||
_ CommentPosition = iota
|
||||
LEADING // Before the pertinent expression
|
||||
TRAILING // After the pertinent expression
|
||||
KEY // After a key or keyword
|
||||
COLON // After a colon in a field declaration
|
||||
FINAL // Final comments in a block, not belonging to a specific expression or the comment after a trailing , in an array or object literal
|
||||
TBD
|
||||
)
|
||||
|
||||
// Comment contains the data of the comment
|
||||
type Comment struct {
|
||||
Begin file.Idx
|
||||
Text string
|
||||
Position CommentPosition
|
||||
}
|
||||
|
||||
// String returns a stringified version of the position
|
||||
func (cp CommentPosition) String() string {
|
||||
switch cp {
|
||||
case LEADING:
|
||||
return "Leading"
|
||||
case TRAILING:
|
||||
return "Trailing"
|
||||
case KEY:
|
||||
return "Key"
|
||||
case COLON:
|
||||
return "Colon"
|
||||
case FINAL:
|
||||
return "Final"
|
||||
default:
|
||||
return "???"
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a stringified version of the comment
|
||||
func (c Comment) String() string {
|
||||
return fmt.Sprintf("Comment: %v", c.Text)
|
||||
}
|
||||
|
||||
// CommentMap is the data structure where all found comments are stored
|
||||
type CommentMap map[Node][]*Comment
|
||||
|
||||
// AddComment adds a single comment to the map
|
||||
func (cm CommentMap) AddComment(node Node, comment *Comment) {
|
||||
list := cm[node]
|
||||
list = append(list, comment)
|
||||
|
||||
cm[node] = list
|
||||
}
|
||||
|
||||
// AddComments adds a slice of comments, given a node and an updated position
|
||||
func (cm CommentMap) AddComments(node Node, comments []*Comment, position CommentPosition) {
|
||||
for _, comment := range comments {
|
||||
comment.Position = position
|
||||
cm.AddComment(node, comment)
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the size of the map
|
||||
func (cm CommentMap) Size() int {
|
||||
size := 0
|
||||
for _, comments := range cm {
|
||||
size += len(comments)
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// MoveComments moves comments with a given position from a node to another
|
||||
func (cm CommentMap) MoveComments(from, to Node, position CommentPosition) {
|
||||
for i, c := range cm[from] {
|
||||
if c.Position == position {
|
||||
cm.AddComment(to, c)
|
||||
|
||||
// Remove the comment from the "from" slice
|
||||
cm[from][i] = cm[from][len(cm[from])-1]
|
||||
cm[from][len(cm[from])-1] = nil
|
||||
cm[from] = cm[from][:len(cm[from])-1]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
.PHONY: build
|
||||
|
||||
build:
|
||||
go build -a
|
||||
-gxc build-darwin-386 -a
|
|
@ -1,48 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/robertkrimen/otto"
|
||||
"github.com/robertkrimen/otto/underscore"
|
||||
)
|
||||
|
||||
var flag_underscore *bool = flag.Bool("underscore", true, "Load underscore into the runtime environment")
|
||||
|
||||
func readSource(filename string) ([]byte, error) {
|
||||
if filename == "" || filename == "-" {
|
||||
return ioutil.ReadAll(os.Stdin)
|
||||
}
|
||||
return ioutil.ReadFile(filename)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if !*flag_underscore {
|
||||
underscore.Disable()
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
src, err := readSource(flag.Arg(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vm := otto.New()
|
||||
_, err = vm.Run(src)
|
||||
return err
|
||||
}()
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *otto.Error:
|
||||
fmt.Print(err.String())
|
||||
default:
|
||||
fmt.Println(err)
|
||||
}
|
||||
os.Exit(64)
|
||||
}
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
// Package repl implements a REPL (read-eval-print loop) for otto.
|
||||
package repl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/robertkrimen/otto"
|
||||
"gopkg.in/readline.v1"
|
||||
)
|
||||
|
||||
var counter uint32
|
||||
|
||||
// DebuggerHandler implements otto's debugger handler signature, providing a
|
||||
// simple drop-in debugger implementation.
|
||||
func DebuggerHandler(vm *otto.Otto) {
|
||||
i := atomic.AddUint32(&counter, 1)
|
||||
|
||||
// purposefully ignoring the error here - we can't do anything useful with
|
||||
// it except panicking, and that'd be pretty rude. it'd be easy enough for a
|
||||
// consumer to define an equivalent function that _does_ panic if desired.
|
||||
_ = RunWithPrompt(vm, fmt.Sprintf("DEBUGGER[%d]>", i))
|
||||
}
|
||||
|
||||
// Run creates a REPL with the default prompt and no prelude.
|
||||
func Run(vm *otto.Otto) error {
|
||||
return RunWithPromptAndPrelude(vm, "", "")
|
||||
}
|
||||
|
||||
// RunWithPrompt runs a REPL with the given prompt and no prelude.
|
||||
func RunWithPrompt(vm *otto.Otto, prompt string) error {
|
||||
return RunWithPromptAndPrelude(vm, prompt, "")
|
||||
}
|
||||
|
||||
// RunWithPrelude runs a REPL with the default prompt and the given prelude.
|
||||
func RunWithPrelude(vm *otto.Otto, prelude string) error {
|
||||
return RunWithPromptAndPrelude(vm, "", prelude)
|
||||
}
|
||||
|
||||
// RunWithPromptAndPrelude runs a REPL with the given prompt and prelude.
|
||||
func RunWithPromptAndPrelude(vm *otto.Otto, prompt, prelude string) error {
|
||||
if prompt == "" {
|
||||
prompt = ">"
|
||||
}
|
||||
|
||||
prompt = strings.Trim(prompt, " ")
|
||||
prompt += " "
|
||||
|
||||
rl, err := readline.New(prompt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if prelude != "" {
|
||||
if _, err := io.Copy(rl.Stderr(), strings.NewReader(prelude+"\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rl.Refresh()
|
||||
}
|
||||
|
||||
var d []string
|
||||
|
||||
for {
|
||||
l, err := rl.Readline()
|
||||
if err != nil {
|
||||
if err == readline.ErrInterrupt {
|
||||
if d != nil {
|
||||
d = nil
|
||||
|
||||
rl.SetPrompt(prompt)
|
||||
rl.Refresh()
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if l == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
d = append(d, l)
|
||||
|
||||
s, err := vm.Compile("repl", strings.Join(d, "\n"))
|
||||
if err != nil {
|
||||
rl.SetPrompt(strings.Repeat(" ", len(prompt)))
|
||||
} else {
|
||||
rl.SetPrompt(prompt)
|
||||
|
||||
d = nil
|
||||
|
||||
v, err := vm.Eval(s)
|
||||
if err != nil {
|
||||
if oerr, ok := err.(*otto.Error); ok {
|
||||
io.Copy(rl.Stdout(), strings.NewReader(oerr.String()))
|
||||
} else {
|
||||
io.Copy(rl.Stdout(), strings.NewReader(err.Error()))
|
||||
}
|
||||
} else {
|
||||
rl.Stdout().Write([]byte(v.String() + "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
rl.Refresh()
|
||||
}
|
||||
|
||||
return rl.Close()
|
||||
}
|
|
@ -1,669 +0,0 @@
|
|||
// This file was AUTOMATICALLY GENERATED by terst-import (smuggol) from github.com/robertkrimen/terst
|
||||
|
||||
/*
|
||||
Package terst is a terse (terst = test + terse), easy-to-use testing library for Go.
|
||||
|
||||
terst is compatible with (and works via) the standard testing package: http://golang.org/pkg/testing
|
||||
|
||||
var is = terst.Is
|
||||
|
||||
func Test(t *testing.T) {
|
||||
terst.Terst(t, func() {
|
||||
is("abc", "abc")
|
||||
|
||||
is(1, ">", 0)
|
||||
|
||||
var abc []int
|
||||
is(abc, nil)
|
||||
}
|
||||
}
|
||||
|
||||
Do not import terst directly, instead use `terst-import` to copy it into your testing environment:
|
||||
|
||||
https://github.com/robertkrimen/terst/tree/master/terst-import
|
||||
|
||||
$ go get github.com/robertkrimen/terst/terst-import
|
||||
|
||||
$ terst-import
|
||||
|
||||
*/
|
||||
package terst
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Is compares two values (got & expect) and returns true if the comparison is true,
|
||||
// false otherwise. In addition, if the comparison is false, Is will report the error
|
||||
// in a manner similar to testing.T.Error(...). Is also takes an optional argument,
|
||||
// a comparator, that changes how the comparison is made. The following
|
||||
// comparators are available:
|
||||
//
|
||||
// == # got == expect (default)
|
||||
// != # got != expect
|
||||
//
|
||||
// > # got > expect (float32, uint, uint16, int, int64, ...)
|
||||
// >= # got >= expect
|
||||
// < # got < expect
|
||||
// <= # got <= expect
|
||||
//
|
||||
// =~ # regexp.MustCompile(expect).Match{String}(got)
|
||||
// !~ # !regexp.MustCompile(expect).Match{String}(got)
|
||||
//
|
||||
// Basic usage with the default comparator (==):
|
||||
//
|
||||
// Is(<got>, <expect>)
|
||||
//
|
||||
// Specifying a different comparator:
|
||||
//
|
||||
// Is(<got>, <comparator>, <expect>)
|
||||
//
|
||||
// A simple comparison:
|
||||
//
|
||||
// Is(2 + 2, 4)
|
||||
//
|
||||
// A bit trickier:
|
||||
//
|
||||
// Is(1, ">", 0)
|
||||
// Is(2 + 2, "!=", 5)
|
||||
// Is("Nothing happens.", "=~", `ing(\s+)happens\.$`)
|
||||
//
|
||||
// Is should only be called under a Terst(t, ...) call. For a standalone version,
|
||||
// use IsErr. If no scope is found and the comparison is false, then Is will panic the error.
|
||||
//
|
||||
func Is(arguments ...interface{}) bool {
|
||||
err := IsErr(arguments...)
|
||||
if err != nil {
|
||||
call := Caller()
|
||||
if call == nil {
|
||||
panic(err)
|
||||
}
|
||||
call.Error(err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type (
|
||||
// ErrFail indicates a comparison failure (e.g. 0 > 1).
|
||||
ErrFail error
|
||||
|
||||
// ErrInvalid indicates an invalid comparison (e.g. bool == string).
|
||||
ErrInvalid error
|
||||
)
|
||||
|
||||
var errInvalid = errors.New("invalid")
|
||||
|
||||
var registry = struct {
|
||||
table map[uintptr]*_scope
|
||||
lock sync.RWMutex
|
||||
}{
|
||||
table: map[uintptr]*_scope{},
|
||||
}
|
||||
|
||||
func registerScope(pc uintptr, scope *_scope) {
|
||||
registry.lock.Lock()
|
||||
defer registry.lock.Unlock()
|
||||
registry.table[pc] = scope
|
||||
}
|
||||
|
||||
func scope() *_scope {
|
||||
scope, _ := findScope()
|
||||
return scope
|
||||
}
|
||||
|
||||
func floatCompare(a float64, b float64) int {
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
// NaN == NaN
|
||||
return 0
|
||||
}
|
||||
|
||||
func bigIntCompare(a *big.Int, b *big.Int) int {
|
||||
return a.Cmp(b)
|
||||
}
|
||||
|
||||
func bigInt(value int64) *big.Int {
|
||||
return big.NewInt(value)
|
||||
}
|
||||
|
||||
func bigUint(value uint64) *big.Int {
|
||||
return big.NewInt(0).SetUint64(value)
|
||||
}
|
||||
|
||||
type _toString interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
func toString(value interface{}) (string, error) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
return value, nil
|
||||
case _toString:
|
||||
return value.String(), nil
|
||||
case error:
|
||||
return value.Error(), nil
|
||||
}
|
||||
return "", errInvalid
|
||||
}
|
||||
|
||||
func matchString(got string, expect *regexp.Regexp) (int, error) {
|
||||
if expect.MatchString(got) {
|
||||
return 0, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func match(got []byte, expect *regexp.Regexp) (int, error) {
|
||||
if expect.Match(got) {
|
||||
return 0, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func compareMatch(got, expect interface{}) (int, error) {
|
||||
switch got := got.(type) {
|
||||
case []byte:
|
||||
switch expect := expect.(type) {
|
||||
case string:
|
||||
matcher, err := regexp.Compile(expect)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return match(got, matcher)
|
||||
case *regexp.Regexp:
|
||||
return match(got, expect)
|
||||
}
|
||||
default:
|
||||
if got, err := toString(got); err == nil {
|
||||
switch expect := expect.(type) {
|
||||
case string:
|
||||
matcher, err := regexp.Compile(expect)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return matchString(got, matcher)
|
||||
case *regexp.Regexp:
|
||||
return matchString(got, expect)
|
||||
}
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return 0, errInvalid
|
||||
}
|
||||
|
||||
func floatPromote(value reflect.Value) (float64, error) {
|
||||
kind := value.Kind()
|
||||
if reflect.Int <= kind && kind <= reflect.Int64 {
|
||||
return float64(value.Int()), nil
|
||||
}
|
||||
if reflect.Uint <= kind && kind <= reflect.Uint64 {
|
||||
return float64(value.Uint()), nil
|
||||
}
|
||||
if reflect.Float32 <= kind && kind <= reflect.Float64 {
|
||||
return value.Float(), nil
|
||||
}
|
||||
return 0, errInvalid
|
||||
}
|
||||
|
||||
func bigIntPromote(value reflect.Value) (*big.Int, error) {
|
||||
kind := value.Kind()
|
||||
if reflect.Int <= kind && kind <= reflect.Int64 {
|
||||
return bigInt(value.Int()), nil
|
||||
}
|
||||
if reflect.Uint <= kind && kind <= reflect.Uint64 {
|
||||
return bigUint(value.Uint()), nil
|
||||
}
|
||||
return nil, errInvalid
|
||||
}
|
||||
|
||||
func compareOther(got, expect interface{}) (int, error) {
|
||||
{
|
||||
switch expect.(type) {
|
||||
case float32, float64:
|
||||
return compareNumber(got, expect)
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
return compareNumber(got, expect)
|
||||
case int, int8, int16, int32, int64:
|
||||
return compareNumber(got, expect)
|
||||
case string:
|
||||
var err error
|
||||
got, err = toString(got)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case nil:
|
||||
got := reflect.ValueOf(got)
|
||||
switch got.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface:
|
||||
if got.IsNil() {
|
||||
return 0, nil
|
||||
}
|
||||
return -1, nil
|
||||
case reflect.Invalid: // reflect.Invalid: var abc interface{} = nil
|
||||
return 0, nil
|
||||
}
|
||||
return 0, errInvalid
|
||||
}
|
||||
}
|
||||
|
||||
if reflect.ValueOf(got).Type() != reflect.ValueOf(expect).Type() {
|
||||
return 0, errInvalid
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(got, expect) {
|
||||
return 0, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func compareNumber(got, expect interface{}) (int, error) {
|
||||
{
|
||||
got := reflect.ValueOf(got)
|
||||
k0 := got.Kind()
|
||||
expect := reflect.ValueOf(expect)
|
||||
k1 := expect.Kind()
|
||||
if reflect.Float32 <= k0 && k0 <= reflect.Float64 ||
|
||||
reflect.Float32 <= k1 && k1 <= reflect.Float64 {
|
||||
got, err := floatPromote(got)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
expect, err := floatPromote(expect)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return floatCompare(got, expect), nil
|
||||
} else {
|
||||
got, err := bigIntPromote(got)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
expect, err := bigIntPromote(expect)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return got.Cmp(expect), nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errInvalid
|
||||
}
|
||||
|
||||
// IsErr compares two values (got & expect) and returns nil if the comparison is true, an ErrFail if
|
||||
// the comparison is false, or an ErrInvalid if the comparison is invalid. IsErr also
|
||||
// takes an optional argument, a comparator, that changes how the comparison is made.
|
||||
//
|
||||
// Is & IsErr are similar but different:
|
||||
//
|
||||
// Is(...) // Should only be called within a Terst(...) call
|
||||
// IsErr(...) // A standalone comparator, the same as Is, just without the automatic reporting
|
||||
//
|
||||
func IsErr(arguments ...interface{}) error {
|
||||
var got, expect interface{}
|
||||
comparator := "=="
|
||||
switch len(arguments) {
|
||||
case 0, 1:
|
||||
return fmt.Errorf("invalid number of arguments to IsErr: %d", len(arguments))
|
||||
case 2:
|
||||
got, expect = arguments[0], arguments[1]
|
||||
default:
|
||||
if value, ok := arguments[1].(string); ok {
|
||||
comparator = value
|
||||
} else {
|
||||
return fmt.Errorf("invalid comparator: %v", arguments[1])
|
||||
}
|
||||
got, expect = arguments[0], arguments[2]
|
||||
}
|
||||
|
||||
var result int
|
||||
var err error
|
||||
|
||||
switch comparator {
|
||||
case "<", "<=", ">", ">=":
|
||||
result, err = compareNumber(got, expect)
|
||||
case "=~", "!~":
|
||||
result, err = compareMatch(got, expect)
|
||||
case "==", "!=":
|
||||
result, err = compareOther(got, expect)
|
||||
default:
|
||||
return fmt.Errorf("invalid comparator: %s", comparator)
|
||||
}
|
||||
|
||||
if err == errInvalid {
|
||||
return ErrInvalid(fmt.Errorf(
|
||||
"\nINVALID (%s):\n got: %v (%T)\n expected: %v (%T)",
|
||||
comparator,
|
||||
got, got,
|
||||
expect, expect,
|
||||
))
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
equality, pass := false, false
|
||||
|
||||
switch comparator {
|
||||
case "==", "=~":
|
||||
equality = true
|
||||
pass = result == 0
|
||||
case "!=", "!~":
|
||||
equality = true
|
||||
pass = result != 0
|
||||
case "<":
|
||||
pass = result < 0
|
||||
case "<=":
|
||||
pass = result <= 0
|
||||
case ">":
|
||||
pass = result > 0
|
||||
case ">=":
|
||||
pass = result >= 0
|
||||
}
|
||||
|
||||
if !pass {
|
||||
if equality {
|
||||
if comparator[1] == '~' {
|
||||
if value, ok := got.([]byte); ok {
|
||||
return ErrFail(fmt.Errorf(
|
||||
"\nFAIL (%s)\n got: %s %v%s\nexpected: %v%s",
|
||||
comparator,
|
||||
value, got, typeKindString(got),
|
||||
expect, typeKindString(expect),
|
||||
))
|
||||
}
|
||||
}
|
||||
return ErrFail(fmt.Errorf(
|
||||
"\nFAIL (%s)\n got: %v%s\nexpected: %v%s",
|
||||
comparator,
|
||||
got, typeKindString(got),
|
||||
expect, typeKindString(expect),
|
||||
))
|
||||
}
|
||||
return ErrFail(fmt.Errorf(
|
||||
"\nFAIL (%s)\n got: %v%s\nexpected: %s %v%s",
|
||||
comparator,
|
||||
got, typeKindString(got),
|
||||
comparator, expect, typeKindString(expect),
|
||||
))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func typeKindString(value interface{}) string {
|
||||
reflectValue := reflect.ValueOf(value)
|
||||
kind := reflectValue.Kind().String()
|
||||
result := fmt.Sprintf("%T", value)
|
||||
if kind == result {
|
||||
if kind == "string" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(" (%T)", value)
|
||||
}
|
||||
return fmt.Sprintf(" (%T=%s)", value, kind)
|
||||
}
|
||||
|
||||
func (scope *_scope) reset() {
|
||||
scope.name = ""
|
||||
scope.output = scope.output[:]
|
||||
scope.start = time.Time{}
|
||||
scope.duration = 0
|
||||
}
|
||||
|
||||
// Terst creates a testing scope, where Is can be called and errors will be reported
|
||||
// according to the top-level location of the comparison, and not where the Is call
|
||||
// actually takes place. For example:
|
||||
//
|
||||
// func test(value int) {
|
||||
// Is(value, 5) // <--- This failure is reported below.
|
||||
// }
|
||||
//
|
||||
// Terst(t, func(){
|
||||
//
|
||||
// Is(2, ">", 3) // <--- An error is reported here.
|
||||
//
|
||||
// test(5) // <--- An error is reported here.
|
||||
//
|
||||
// })
|
||||
//
|
||||
func Terst(t *testing.T, arguments ...func()) {
|
||||
scope := &_scope{
|
||||
t: t,
|
||||
}
|
||||
|
||||
pc, _, _, ok := runtime.Caller(1) // TODO Associate with the Test... func
|
||||
if !ok {
|
||||
panic("Here be dragons.")
|
||||
}
|
||||
|
||||
_, scope.testFunc = findTestFunc()
|
||||
|
||||
registerScope(pc, scope)
|
||||
|
||||
for _, fn := range arguments {
|
||||
func() {
|
||||
scope.reset()
|
||||
name := scope.testFunc.Name()
|
||||
index := strings.LastIndex(scope.testFunc.Name(), ".")
|
||||
if index >= 0 {
|
||||
name = name[index+1:] + "(Terst)"
|
||||
} else {
|
||||
name = "(Terst)"
|
||||
}
|
||||
name = "(Terst)"
|
||||
scope.name = name
|
||||
scope.start = time.Now()
|
||||
defer func() {
|
||||
scope.duration = time.Now().Sub(scope.start)
|
||||
if err := recover(); err != nil {
|
||||
scope.t.Fail()
|
||||
scope.report()
|
||||
panic(err)
|
||||
}
|
||||
scope.report()
|
||||
}()
|
||||
fn()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// From "testing"
|
||||
func (scope *_scope) report() {
|
||||
format := "~~~ %s: (Terst)\n%s"
|
||||
if scope.t.Failed() {
|
||||
fmt.Printf(format, "FAIL", scope.output)
|
||||
} else if testing.Verbose() && len(scope.output) > 0 {
|
||||
fmt.Printf(format, "PASS", scope.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (scope *_scope) log(call _entry, str string) {
|
||||
scope.mu.Lock()
|
||||
defer scope.mu.Unlock()
|
||||
scope.output = append(scope.output, decorate(call, str)...)
|
||||
}
|
||||
|
||||
// decorate prefixes the string with the file and line of the call site
|
||||
// and inserts the final newline if needed and indentation tabs for formascing.
|
||||
func decorate(call _entry, s string) string {
|
||||
|
||||
file, line := call.File, call.Line
|
||||
if call.PC > 0 {
|
||||
// Truncate file name at last file name separator.
|
||||
if index := strings.LastIndex(file, "/"); index >= 0 {
|
||||
file = file[index+1:]
|
||||
} else if index = strings.LastIndex(file, "\\"); index >= 0 {
|
||||
file = file[index+1:]
|
||||
}
|
||||
} else {
|
||||
file = "???"
|
||||
line = 1
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
// Every line is indented at least one tab.
|
||||
buf.WriteByte('\t')
|
||||
fmt.Fprintf(buf, "%s:%d: ", file, line)
|
||||
lines := strings.Split(s, "\n")
|
||||
if l := len(lines); l > 1 && lines[l-1] == "" {
|
||||
lines = lines[:l-1]
|
||||
}
|
||||
for i, line := range lines {
|
||||
if i > 0 {
|
||||
// Second and subsequent lines are indented an extra tab.
|
||||
buf.WriteString("\n\t\t")
|
||||
}
|
||||
buf.WriteString(line)
|
||||
}
|
||||
buf.WriteByte('\n')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func findScope() (*_scope, _entry) {
|
||||
registry.lock.RLock()
|
||||
defer registry.lock.RUnlock()
|
||||
table := registry.table
|
||||
depth := 2 // Starting depth
|
||||
call := _entry{}
|
||||
for {
|
||||
pc, _, _, ok := runtime.Caller(depth)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if scope, exists := table[pc]; exists {
|
||||
pc, file, line, _ := runtime.Caller(depth - 3) // Terst(...) + func(){}() + fn() => ???()
|
||||
call.PC = pc
|
||||
call.File = file
|
||||
call.Line = line
|
||||
return scope, call
|
||||
}
|
||||
depth++
|
||||
}
|
||||
return nil, _entry{}
|
||||
}
|
||||
|
||||
// Call is a reference to a line immediately under a Terst testing scope.
|
||||
type Call struct {
|
||||
scope *_scope
|
||||
entry _entry
|
||||
}
|
||||
|
||||
// Caller will search the stack, looking for a Terst testing scope. If a scope
|
||||
// is found, then Caller returns a Call for logging errors, accessing testing.T, etc.
|
||||
// If no scope is found, Caller returns nil.
|
||||
func Caller() *Call {
|
||||
scope, entry := findScope()
|
||||
if scope == nil {
|
||||
return nil
|
||||
}
|
||||
return &Call{
|
||||
scope: scope,
|
||||
entry: entry,
|
||||
}
|
||||
}
|
||||
|
||||
// TestFunc returns the *runtime.Func entry for the top-level Test...(t testing.T)
|
||||
// function.
|
||||
func (cl *Call) TestFunc() *runtime.Func {
|
||||
return cl.scope.testFunc
|
||||
}
|
||||
|
||||
// T returns the original testing.T passed to Terst(...)
|
||||
func (cl *Call) T() *testing.T {
|
||||
return cl.scope.t
|
||||
}
|
||||
|
||||
// Log is the terst version of `testing.T.Log`
|
||||
func (cl *Call) Log(arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintln(arguments...))
|
||||
}
|
||||
|
||||
// Logf is the terst version of `testing.T.Logf`
|
||||
func (cl *Call) Logf(format string, arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintf(format, arguments...))
|
||||
}
|
||||
|
||||
// Error is the terst version of `testing.T.Error`
|
||||
func (cl *Call) Error(arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintln(arguments...))
|
||||
cl.scope.t.Fail()
|
||||
}
|
||||
|
||||
// Errorf is the terst version of `testing.T.Errorf`
|
||||
func (cl *Call) Errorf(format string, arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintf(format, arguments...))
|
||||
cl.scope.t.Fail()
|
||||
}
|
||||
|
||||
// Skip is the terst version of `testing.T.Skip`
|
||||
func (cl *Call) Skip(arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintln(arguments...))
|
||||
cl.scope.t.SkipNow()
|
||||
}
|
||||
|
||||
// Skipf is the terst version of `testing.T.Skipf`
|
||||
func (cl *Call) Skipf(format string, arguments ...interface{}) {
|
||||
cl.scope.log(cl.entry, fmt.Sprintf(format, arguments...))
|
||||
cl.scope.t.SkipNow()
|
||||
}
|
||||
|
||||
type _scope struct {
|
||||
t *testing.T
|
||||
testFunc *runtime.Func
|
||||
name string
|
||||
mu sync.RWMutex
|
||||
output []byte
|
||||
start time.Time
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
type _entry struct {
|
||||
PC uintptr
|
||||
File string
|
||||
Line int
|
||||
Func *runtime.Func
|
||||
}
|
||||
|
||||
func _findFunc(match string) (_entry, *runtime.Func) {
|
||||
depth := 2 // Starting depth
|
||||
for {
|
||||
pc, file, line, ok := runtime.Caller(depth)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fn := runtime.FuncForPC(pc)
|
||||
name := fn.Name()
|
||||
if index := strings.LastIndex(name, match); index >= 0 {
|
||||
// Assume we have an instance of TestXyzzy in a _test file
|
||||
return _entry{
|
||||
PC: pc,
|
||||
File: file,
|
||||
Line: line,
|
||||
Func: fn,
|
||||
}, fn
|
||||
}
|
||||
depth++
|
||||
}
|
||||
return _entry{}, nil
|
||||
}
|
||||
|
||||
func findTestFunc() (_entry, *runtime.Func) {
|
||||
return _findFunc(".Test")
|
||||
}
|
||||
|
||||
func findTerstFunc() (_entry, *runtime.Func) {
|
||||
return _findFunc(".Terst")
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
.PHONY: test fetch clean build err report
|
||||
|
||||
TESTER := tester
|
||||
|
||||
test: $(TESTER)
|
||||
for test in test-*.js; do ./$^ -test=true $$test 1>/dev/null || exit 1; done
|
||||
@echo PASS
|
||||
|
||||
report: $(TESTER)
|
||||
./$^ -report | grep -v "MT READY"
|
||||
|
||||
fetch: $(TESTER)
|
||||
./$^ fetch
|
||||
|
||||
build:
|
||||
go build -a -o $(TESTER)
|
||||
|
||||
$(TESTER): tester.go
|
||||
$(MAKE) build
|
||||
|
||||
clean:
|
||||
rm -f test-*.js
|
||||
rm -f $(TESTER)
|
||||
|
||||
err: $(TESTER)
|
||||
for test in test-*.js; do ./$^ $$test; done 2>$@
|
|
@ -1,196 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/robertkrimen/otto"
|
||||
"github.com/robertkrimen/otto/parser"
|
||||
)
|
||||
|
||||
var flag_test *bool = flag.Bool("test", false, "")
|
||||
var flag_report *bool = flag.Bool("report", false, "")
|
||||
|
||||
var match_ReferenceError_not_defined = regexp.MustCompile(`^ReferenceError: \S+ is not defined$`)
|
||||
var match_lookahead = regexp.MustCompile(`Invalid regular expression: re2: Invalid \(\?[=!]\) <lookahead>`)
|
||||
var match_backreference = regexp.MustCompile(`Invalid regular expression: re2: Invalid \\\d <backreference>`)
|
||||
var match_TypeError_undefined = regexp.MustCompile(`^TypeError: Cannot access member '[^']+' of undefined$`)
|
||||
|
||||
var target = map[string]string{
|
||||
"test-angular-bindonce.js": "fail", // (anonymous): Line 1:944 Unexpected token ( (and 40 more errors)
|
||||
"test-jsforce.js": "fail", // (anonymous): Line 9:28329 RuneError (and 5 more errors)
|
||||
"test-chaplin.js": "parse", // Error: Chaplin requires Common.js or AMD modules
|
||||
"test-dropbox.js.js": "parse", // Error: dropbox.js loaded in an unsupported JavaScript environment.
|
||||
"test-epitome.js": "parse", // TypeError: undefined is not a function
|
||||
"test-portal.js": "parse", // TypeError
|
||||
"test-reactive-coffee.js": "parse", // Dependencies are not met for reactive: _ and $ not found
|
||||
"test-scriptaculous.js": "parse", // script.aculo.us requires the Prototype JavaScript framework >= 1.6.0.3
|
||||
"test-waypoints.js": "parse", // TypeError: undefined is not a function
|
||||
"test-webuploader.js": "parse", // Error: `jQuery` is undefined
|
||||
"test-xuijs.js": "parse", // TypeError: undefined is not a function
|
||||
}
|
||||
|
||||
// http://cdnjs.com/
|
||||
// http://api.cdnjs.com/libraries
|
||||
|
||||
func fetch(name, location string) error {
|
||||
response, err := http.Get(location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(location, ".js") {
|
||||
return nil
|
||||
}
|
||||
|
||||
filename := "test-" + name + ".js"
|
||||
fmt.Println(filename, len(body))
|
||||
return ioutil.WriteFile(filename, body, 0644)
|
||||
}
|
||||
|
||||
func test(filename string) error {
|
||||
script, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !*flag_report {
|
||||
fmt.Fprintln(os.Stdout, filename, len(script))
|
||||
}
|
||||
|
||||
parse := false
|
||||
option := target[filename]
|
||||
|
||||
if option != "parse" {
|
||||
vm := otto.New()
|
||||
_, err = vm.Run(string(script))
|
||||
if err != nil {
|
||||
value := err.Error()
|
||||
switch {
|
||||
case match_ReferenceError_not_defined.MatchString(value):
|
||||
case match_TypeError_undefined.MatchString(value):
|
||||
case match_lookahead.MatchString(value):
|
||||
case match_backreference.MatchString(value):
|
||||
default:
|
||||
return err
|
||||
}
|
||||
parse = true
|
||||
}
|
||||
}
|
||||
|
||||
if parse {
|
||||
_, err = parser.ParseFile(nil, filename, string(script), parser.IgnoreRegExpErrors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target[filename] = "parse"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
filename := ""
|
||||
|
||||
err := func() error {
|
||||
|
||||
if flag.Arg(0) == "fetch" {
|
||||
response, err := http.Get("http://api.cdnjs.com/libraries")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tmp map[string]interface{}
|
||||
|
||||
err = json.Unmarshal(body, &tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, value := range tmp["results"].([]interface{}) {
|
||||
wg.Add(1)
|
||||
library := value.(map[string]interface{})
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fetch(library["name"].(string), library["latest"].(string))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if *flag_report {
|
||||
files, err := ioutil.ReadDir(".")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
fmt.Fprintln(writer, "", "\t| Status")
|
||||
fmt.Fprintln(writer, "---", "\t| ---")
|
||||
for _, file := range files {
|
||||
filename := file.Name()
|
||||
if !strings.HasPrefix(filename, "test-") {
|
||||
continue
|
||||
}
|
||||
err := test(filename)
|
||||
option := target[filename]
|
||||
name := strings.TrimPrefix(strings.TrimSuffix(filename, ".js"), "test-")
|
||||
if err == nil {
|
||||
switch option {
|
||||
case "":
|
||||
fmt.Fprintln(writer, name, "\t| pass")
|
||||
case "parse":
|
||||
fmt.Fprintln(writer, name, "\t| pass (parse)")
|
||||
case "re2":
|
||||
continue
|
||||
fmt.Fprintln(writer, name, "\t| unknown (re2)")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(writer, name, "\t| fail")
|
||||
}
|
||||
}
|
||||
writer.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
filename = flag.Arg(0)
|
||||
return test(filename)
|
||||
|
||||
}()
|
||||
if err != nil {
|
||||
if filename != "" {
|
||||
if *flag_test && target[filename] == "fail" {
|
||||
goto exit
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", filename, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
os.Exit(64)
|
||||
}
|
||||
exit:
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package otto
|
||||
|
||||
func (rt *_runtime) newErrorObject(name string, message Value) *_object {
|
||||
self := rt.newClassObject("Error")
|
||||
if message.IsDefined() {
|
||||
msg := message.string()
|
||||
self.defineProperty("message", toValue_string(msg), 0111, false)
|
||||
self.value = newError(rt, name, msg)
|
||||
} else {
|
||||
self.value = newError(rt, name)
|
||||
}
|
||||
return self
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
.PHONY: source
|
||||
|
||||
source: source.go
|
||||
|
||||
underscore.js:
|
||||
curl -kL http://underscorejs.org/underscore.js > $@
|
||||
|
||||
source.go: underscore.js
|
||||
go-bindata -f underscore -p underscore -u true < $< 2>/dev/null | grep -v '^//' | gofmt > $@
|
||||
head -4 $< >> $@
|
||||
mv $< ..
|
|
@ -1,53 +0,0 @@
|
|||
# underscore
|
||||
--
|
||||
import "github.com/robertkrimen/otto/underscore"
|
||||
|
||||
Package underscore contains the source for the JavaScript utility-belt library.
|
||||
|
||||
import (
|
||||
_ "github.com/robertkrimen/otto/underscore"
|
||||
)
|
||||
// Every Otto runtime will now include underscore
|
||||
|
||||
http://underscorejs.org
|
||||
|
||||
https://github.com/documentcloud/underscore
|
||||
|
||||
By importing this package, you'll automatically load underscore every time you
|
||||
create a new Otto runtime.
|
||||
|
||||
To prevent this behavior, you can do the following:
|
||||
|
||||
import (
|
||||
"github.com/robertkrimen/otto/underscore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
underscore.Disable()
|
||||
}
|
||||
|
||||
## Usage
|
||||
|
||||
#### func Disable
|
||||
|
||||
```go
|
||||
func Disable()
|
||||
```
|
||||
Disable underscore runtime inclusion.
|
||||
|
||||
#### func Enable
|
||||
|
||||
```go
|
||||
func Enable()
|
||||
```
|
||||
Enable underscore runtime inclusion.
|
||||
|
||||
#### func Source
|
||||
|
||||
```go
|
||||
func Source() string
|
||||
```
|
||||
Source returns the underscore source.
|
||||
|
||||
--
|
||||
**godocdown** http://github.com/robertkrimen/godocdown
|
File diff suppressed because it is too large
Load Diff
|
@ -1,84 +0,0 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
my $underscore_test = shift @ARGV || "";
|
||||
if (!-d $underscore_test) {
|
||||
print <<_END_;
|
||||
Usage:
|
||||
|
||||
testify ./underscore/test
|
||||
|
||||
# Should look something like:
|
||||
arrays.js
|
||||
chaining.js
|
||||
collections.js
|
||||
functions.js
|
||||
index.html
|
||||
objects.js
|
||||
speed.js
|
||||
utility.js
|
||||
vendor
|
||||
|
||||
_END_
|
||||
if ($underscore_test) {
|
||||
die "!: Not a directory: $underscore_test\n"
|
||||
}
|
||||
exit;
|
||||
}
|
||||
|
||||
chdir $underscore_test or die "!: $!";
|
||||
|
||||
my @js = <*.js>;
|
||||
|
||||
for my $file (@js) {
|
||||
open my $fh, '<', $file or die "!: $!";
|
||||
my $tests = join "", <$fh>;
|
||||
my @tests = $tests =~ m/
|
||||
^(\s{2}test\(.*?
|
||||
^\s{2}}\);)$
|
||||
/mgxs;
|
||||
close $fh;
|
||||
next unless @tests;
|
||||
print "$file: ", scalar(@tests), "\n";
|
||||
my $underscore_name = "underscore_$file";
|
||||
$underscore_name =~ s/.js$//;
|
||||
my $go_file = "${underscore_name}_test.go";
|
||||
$go_file =~ s/.js$/.go/;
|
||||
open $fh, '>', $go_file or die "!: $!";
|
||||
|
||||
$fh->print(<<_END_);
|
||||
package otto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
_END_
|
||||
|
||||
my $count = 0;
|
||||
for my $test (@tests) {
|
||||
$test =~ s/`([^`]+)`/<$1>/g;
|
||||
my ($name) = $test =~ m/^\s*test\(['"]([^'"]+)['"]/;
|
||||
$fh->print(<<_END_);
|
||||
// $name
|
||||
func Test_${underscore_name}_$count(t *testing.T) {
|
||||
tt(t, func(){
|
||||
test := underscoreTest()
|
||||
|
||||
test(`
|
||||
$test
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
_END_
|
||||
$count++;
|
||||
}
|
||||
}
|
||||
|
||||
# test('#779 - delimeters are applied to unescaped text.', 1, function() {
|
||||
# var template = _.template('<<\nx\n>>', null, {evaluate: /<<(.*?)>>/g});
|
||||
# strictEqual(template(), '<<\nx\n>>');
|
||||
# });
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
Package underscore contains the source for the JavaScript utility-belt library.
|
||||
|
||||
import (
|
||||
_ "github.com/robertkrimen/otto/underscore"
|
||||
)
|
||||
// Every Otto runtime will now include underscore
|
||||
|
||||
http://underscorejs.org
|
||||
|
||||
https://github.com/documentcloud/underscore
|
||||
|
||||
By importing this package, you'll automatically load underscore every time you create a new Otto runtime.
|
||||
|
||||
To prevent this behavior, you can do the following:
|
||||
|
||||
import (
|
||||
"github.com/robertkrimen/otto/underscore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
underscore.Disable()
|
||||
}
|
||||
|
||||
*/
|
||||
package underscore
|
||||
|
||||
import (
|
||||
"github.com/robertkrimen/otto/registry"
|
||||
)
|
||||
|
||||
var entry *registry.Entry = registry.Register(func() string {
|
||||
return Source()
|
||||
})
|
||||
|
||||
// Enable underscore runtime inclusion.
|
||||
func Enable() {
|
||||
entry.Enable()
|
||||
}
|
||||
|
||||
// Disable underscore runtime inclusion.
|
||||
func Disable() {
|
||||
entry.Disable()
|
||||
}
|
||||
|
||||
// Source returns the underscore source.
|
||||
func Source() string {
|
||||
return string(underscore())
|
||||
}
|
|
@ -1,308 +0,0 @@
|
|||
/*
|
||||
Package cors is net/http handler to handle CORS related requests
|
||||
as defined by http://www.w3.org/TR/cors/
|
||||
|
||||
You can configure it by passing an option struct to cors.New:
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"foo.com"},
|
||||
AllowedMethods: []string{"GET", "POST", "DELETE"},
|
||||
AllowCredentials: true,
|
||||
})
|
||||
|
||||
Then insert the handler in the chain:
|
||||
|
||||
handler = c.Handler(handler)
|
||||
|
||||
See Options documentation for more options.
|
||||
|
||||
The resulting handler is a standard net/http handler.
|
||||
*/
|
||||
package cors
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Options is a configuration container to setup the CORS middleware.
|
||||
type Options struct {
|
||||
// AllowedOrigins is a list of origins a cross-domain request can be executed from.
|
||||
// If the special "*" value is present in the list, all origins will be allowed.
|
||||
// Default value is ["*"]
|
||||
AllowedOrigins []string
|
||||
// AllowedMethods is a list of methods the client is allowed to use with
|
||||
// cross-domain requests. Default value is simple methods (GET and POST)
|
||||
AllowedMethods []string
|
||||
// AllowedHeaders is list of non simple headers the client is allowed to use with
|
||||
// cross-domain requests.
|
||||
// If the special "*" value is present in the list, all headers will be allowed.
|
||||
// Default value is [] but "Origin" is always appended to the list.
|
||||
AllowedHeaders []string
|
||||
// ExposedHeaders indicates which headers are safe to expose to the API of a CORS
|
||||
// API specification
|
||||
ExposedHeaders []string
|
||||
// AllowCredentials indicates whether the request can include user credentials like
|
||||
// cookies, HTTP authentication or client side SSL certificates.
|
||||
AllowCredentials bool
|
||||
// MaxAge indicates how long (in seconds) the results of a preflight request
|
||||
// can be cached
|
||||
MaxAge int
|
||||
// Debugging flag adds additional output to debug server side CORS issues
|
||||
Debug bool
|
||||
// log object to use when debugging
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
type Cors struct {
|
||||
// The CORS Options
|
||||
options Options
|
||||
}
|
||||
|
||||
// New creates a new Cors handler with the provided options.
|
||||
func New(options Options) *Cors {
|
||||
// Normalize options
|
||||
// Note: for origins and methods matching, the spec requires a case-sensitive matching.
|
||||
// As it may error prone, we chose to ignore the spec here.
|
||||
normOptions := Options{
|
||||
AllowedOrigins: convert(options.AllowedOrigins, strings.ToLower),
|
||||
AllowedMethods: convert(options.AllowedMethods, strings.ToUpper),
|
||||
// Origin is always appended as some browsers will always request
|
||||
// for this header at preflight
|
||||
AllowedHeaders: convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey),
|
||||
ExposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey),
|
||||
AllowCredentials: options.AllowCredentials,
|
||||
MaxAge: options.MaxAge,
|
||||
Debug: options.Debug,
|
||||
log: log.New(os.Stdout, "[cors] ", log.LstdFlags),
|
||||
}
|
||||
if len(normOptions.AllowedOrigins) == 0 {
|
||||
// Default is all origins
|
||||
normOptions.AllowedOrigins = []string{"*"}
|
||||
}
|
||||
if len(normOptions.AllowedHeaders) == 1 {
|
||||
// Add some sensible defaults
|
||||
normOptions.AllowedHeaders = []string{"Origin", "Accept", "Content-Type"}
|
||||
}
|
||||
if len(normOptions.AllowedMethods) == 0 {
|
||||
// Default is simple methods
|
||||
normOptions.AllowedMethods = []string{"GET", "POST"}
|
||||
}
|
||||
|
||||
if normOptions.Debug {
|
||||
normOptions.log.Printf("Options: %v", normOptions)
|
||||
}
|
||||
return &Cors{
|
||||
options: normOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// Default creates a new Cors handler with default options
|
||||
func Default() *Cors {
|
||||
return New(Options{})
|
||||
}
|
||||
|
||||
// Handler apply the CORS specification on the request, and add relevant CORS headers
|
||||
// as necessary.
|
||||
func (cors *Cors) Handler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "OPTIONS" {
|
||||
cors.logf("Handler: Preflight request")
|
||||
cors.handlePreflight(w, r)
|
||||
// Preflight requests are standalone and should stop the chain as some other
|
||||
// middleware may not handle OPTIONS requests correctly. One typical example
|
||||
// is authentication middleware ; OPTIONS requests won't carry authentication
|
||||
// headers (see #1)
|
||||
} else {
|
||||
cors.logf("Handler: Actual request")
|
||||
cors.handleActualRequest(w, r)
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Martini compatible handler
|
||||
func (cors *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "OPTIONS" {
|
||||
cors.logf("HandlerFunc: Preflight request")
|
||||
cors.handlePreflight(w, r)
|
||||
} else {
|
||||
cors.logf("HandlerFunc: Actual request")
|
||||
cors.handleActualRequest(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// Negroni compatible interface
|
||||
func (cors *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
if r.Method == "OPTIONS" {
|
||||
cors.logf("ServeHTTP: Preflight request")
|
||||
cors.handlePreflight(w, r)
|
||||
// Preflight requests are standalone and should stop the chain as some other
|
||||
// middleware may not handle OPTIONS requests correctly. One typical example
|
||||
// is authentication middleware ; OPTIONS requests won't carry authentication
|
||||
// headers (see #1)
|
||||
} else {
|
||||
cors.logf("ServeHTTP: Actual request")
|
||||
cors.handleActualRequest(w, r)
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// handlePreflight handles pre-flight CORS requests
|
||||
func (cors *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) {
|
||||
options := cors.options
|
||||
headers := w.Header()
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
if r.Method != "OPTIONS" {
|
||||
cors.logf(" Preflight aborted: %s!=OPTIONS", r.Method)
|
||||
return
|
||||
}
|
||||
if origin == "" {
|
||||
cors.logf(" Preflight aborted: empty origin")
|
||||
return
|
||||
}
|
||||
if !cors.isOriginAllowed(origin) {
|
||||
cors.logf(" Preflight aborted: origin '%s' not allowed", origin)
|
||||
return
|
||||
}
|
||||
|
||||
reqMethod := r.Header.Get("Access-Control-Request-Method")
|
||||
if !cors.isMethodAllowed(reqMethod) {
|
||||
cors.logf(" Preflight aborted: method '%s' not allowed", reqMethod)
|
||||
return
|
||||
}
|
||||
reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers"))
|
||||
if !cors.areHeadersAllowed(reqHeaders) {
|
||||
cors.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders)
|
||||
return
|
||||
}
|
||||
headers.Set("Access-Control-Allow-Origin", origin)
|
||||
headers.Add("Vary", "Origin")
|
||||
// Spec says: Since the list of methods can be unbounded, simply returning the method indicated
|
||||
// by Access-Control-Request-Method (if supported) can be enough
|
||||
headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod))
|
||||
if len(reqHeaders) > 0 {
|
||||
|
||||
// Spec says: Since the list of headers can be unbounded, simply returning supported headers
|
||||
// from Access-Control-Request-Headers can be enough
|
||||
headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", "))
|
||||
}
|
||||
if options.AllowCredentials {
|
||||
headers.Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
if options.MaxAge > 0 {
|
||||
headers.Set("Access-Control-Max-Age", strconv.Itoa(options.MaxAge))
|
||||
}
|
||||
cors.logf(" Preflight response headers: %v", headers)
|
||||
}
|
||||
|
||||
// handleActualRequest handles simple cross-origin requests, actual request or redirects
|
||||
func (cors *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) {
|
||||
options := cors.options
|
||||
headers := w.Header()
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
if r.Method == "OPTIONS" {
|
||||
cors.logf(" Actual request no headers added: method == %s", r.Method)
|
||||
return
|
||||
}
|
||||
if origin == "" {
|
||||
cors.logf(" Actual request no headers added: missing origin")
|
||||
return
|
||||
}
|
||||
if !cors.isOriginAllowed(origin) {
|
||||
cors.logf(" Actual request no headers added: origin '%s' not allowed", origin)
|
||||
return
|
||||
}
|
||||
|
||||
// Note that spec does define a way to specifically disallow a simple method like GET or
|
||||
// POST. Access-Control-Allow-Methods is only used for pre-flight requests and the
|
||||
// spec doesn't instruct to check the allowed methods for simple cross-origin requests.
|
||||
// We think it's a nice feature to be able to have control on those methods though.
|
||||
if !cors.isMethodAllowed(r.Method) {
|
||||
if cors.options.Debug {
|
||||
cors.logf(" Actual request no headers added: method '%s' not allowed",
|
||||
r.Method)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
headers.Set("Access-Control-Allow-Origin", origin)
|
||||
headers.Add("Vary", "Origin")
|
||||
if len(options.ExposedHeaders) > 0 {
|
||||
headers.Set("Access-Control-Expose-Headers", strings.Join(options.ExposedHeaders, ", "))
|
||||
}
|
||||
if options.AllowCredentials {
|
||||
headers.Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
cors.logf(" Actual response added headers: %v", headers)
|
||||
}
|
||||
|
||||
// convenience method. checks if debugging is turned on before printing
|
||||
func (cors *Cors) logf(format string, a ...interface{}) {
|
||||
if cors.options.Debug {
|
||||
cors.options.log.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests
|
||||
// on the endpoint
|
||||
func (cors *Cors) isOriginAllowed(origin string) bool {
|
||||
allowedOrigins := cors.options.AllowedOrigins
|
||||
origin = strings.ToLower(origin)
|
||||
for _, allowedOrigin := range allowedOrigins {
|
||||
switch allowedOrigin {
|
||||
case "*":
|
||||
return true
|
||||
case origin:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isMethodAllowed checks if a given method can be used as part of a cross-domain request
|
||||
// on the endpoing
|
||||
func (cors *Cors) isMethodAllowed(method string) bool {
|
||||
allowedMethods := cors.options.AllowedMethods
|
||||
if len(allowedMethods) == 0 {
|
||||
// If no method allowed, always return false, even for preflight request
|
||||
return false
|
||||
}
|
||||
method = strings.ToUpper(method)
|
||||
if method == "OPTIONS" {
|
||||
// Always allow preflight requests
|
||||
return true
|
||||
}
|
||||
for _, allowedMethod := range allowedMethods {
|
||||
if allowedMethod == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// areHeadersAllowed checks if a given list of headers are allowed to used within
|
||||
// a cross-domain request.
|
||||
func (cors *Cors) areHeadersAllowed(requestedHeaders []string) bool {
|
||||
if len(requestedHeaders) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, header := range requestedHeaders {
|
||||
found := false
|
||||
for _, allowedHeader := range cors.options.AllowedHeaders {
|
||||
if allowedHeader == "*" || allowedHeader == header {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package cors
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type converter func(string) string
|
||||
|
||||
// convert converts a list of string using the passed converter function
|
||||
func convert(s []string, c converter) []string {
|
||||
out := []string{}
|
||||
for _, i := range s {
|
||||
out = append(out, c(i))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func parseHeaderList(headerList string) (headers []string) {
|
||||
for _, header := range strings.Split(headerList, ",") {
|
||||
header = http.CanonicalHeaderKey(strings.TrimSpace(header))
|
||||
if header != "" {
|
||||
headers = append(headers, header)
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
|
@ -1,762 +0,0 @@
|
|||
// This file was generated by go generate; DO NOT EDIT
|
||||
|
||||
package language
|
||||
|
||||
// NumCompactTags is the number of common tags. The maximum tag is
|
||||
// NumCompactTags-1.
|
||||
const NumCompactTags = 747
|
||||
|
||||
var specialTags = []Tag{ // 2 elements
|
||||
0: {lang: 0x61, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
||||
1: {lang: 0x9b, region: 0x132, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
||||
} // Size: 72 bytes
|
||||
|
||||
var coreTags = map[uint32]uint16{
|
||||
0x0: 0, // und
|
||||
0x00a00000: 3, // af
|
||||
0x00a000d0: 4, // af-NA
|
||||
0x00a0015e: 5, // af-ZA
|
||||
0x00b00000: 6, // agq
|
||||
0x00b00051: 7, // agq-CM
|
||||
0x00d00000: 8, // ak
|
||||
0x00d0007e: 9, // ak-GH
|
||||
0x01100000: 10, // am
|
||||
0x0110006e: 11, // am-ET
|
||||
0x01500000: 12, // ar
|
||||
0x01500001: 13, // ar-001
|
||||
0x01500022: 14, // ar-AE
|
||||
0x01500038: 15, // ar-BH
|
||||
0x01500061: 16, // ar-DJ
|
||||
0x01500066: 17, // ar-DZ
|
||||
0x0150006a: 18, // ar-EG
|
||||
0x0150006b: 19, // ar-EH
|
||||
0x0150006c: 20, // ar-ER
|
||||
0x01500095: 21, // ar-IL
|
||||
0x01500099: 22, // ar-IQ
|
||||
0x0150009f: 23, // ar-JO
|
||||
0x015000a6: 24, // ar-KM
|
||||
0x015000aa: 25, // ar-KW
|
||||
0x015000ae: 26, // ar-LB
|
||||
0x015000b7: 27, // ar-LY
|
||||
0x015000b8: 28, // ar-MA
|
||||
0x015000c7: 29, // ar-MR
|
||||
0x015000df: 30, // ar-OM
|
||||
0x015000eb: 31, // ar-PS
|
||||
0x015000f1: 32, // ar-QA
|
||||
0x01500106: 33, // ar-SA
|
||||
0x01500109: 34, // ar-SD
|
||||
0x01500113: 35, // ar-SO
|
||||
0x01500115: 36, // ar-SS
|
||||
0x0150011a: 37, // ar-SY
|
||||
0x0150011e: 38, // ar-TD
|
||||
0x01500126: 39, // ar-TN
|
||||
0x0150015b: 40, // ar-YE
|
||||
0x01c00000: 41, // as
|
||||
0x01c00097: 42, // as-IN
|
||||
0x01d00000: 43, // asa
|
||||
0x01d0012d: 44, // asa-TZ
|
||||
0x01f00000: 45, // ast
|
||||
0x01f0006d: 46, // ast-ES
|
||||
0x02400000: 47, // az
|
||||
0x0241e000: 48, // az-Cyrl
|
||||
0x0241e031: 49, // az-Cyrl-AZ
|
||||
0x02452000: 50, // az-Latn
|
||||
0x02452031: 51, // az-Latn-AZ
|
||||
0x02a00000: 52, // bas
|
||||
0x02a00051: 53, // bas-CM
|
||||
0x02f00000: 54, // be
|
||||
0x02f00046: 55, // be-BY
|
||||
0x03100000: 56, // bem
|
||||
0x0310015f: 57, // bem-ZM
|
||||
0x03300000: 58, // bez
|
||||
0x0330012d: 59, // bez-TZ
|
||||
0x03800000: 60, // bg
|
||||
0x03800037: 61, // bg-BG
|
||||
0x03c00000: 62, // bh
|
||||
0x04900000: 63, // bm
|
||||
0x049000c1: 64, // bm-ML
|
||||
0x04b00000: 65, // bn
|
||||
0x04b00034: 66, // bn-BD
|
||||
0x04b00097: 67, // bn-IN
|
||||
0x04c00000: 68, // bo
|
||||
0x04c00052: 69, // bo-CN
|
||||
0x04c00097: 70, // bo-IN
|
||||
0x05000000: 71, // br
|
||||
0x05000076: 72, // br-FR
|
||||
0x05300000: 73, // brx
|
||||
0x05300097: 74, // brx-IN
|
||||
0x05400000: 75, // bs
|
||||
0x0541e000: 76, // bs-Cyrl
|
||||
0x0541e032: 77, // bs-Cyrl-BA
|
||||
0x05452000: 78, // bs-Latn
|
||||
0x05452032: 79, // bs-Latn-BA
|
||||
0x06100000: 80, // ca
|
||||
0x06100021: 81, // ca-AD
|
||||
0x0610006d: 82, // ca-ES
|
||||
0x06100076: 83, // ca-FR
|
||||
0x0610009c: 84, // ca-IT
|
||||
0x06400000: 85, // ce
|
||||
0x06400104: 86, // ce-RU
|
||||
0x06600000: 87, // cgg
|
||||
0x0660012f: 88, // cgg-UG
|
||||
0x06c00000: 89, // chr
|
||||
0x06c00132: 90, // chr-US
|
||||
0x06f00000: 91, // ckb
|
||||
0x06f00099: 92, // ckb-IQ
|
||||
0x06f0009a: 93, // ckb-IR
|
||||
0x07900000: 94, // cs
|
||||
0x0790005d: 95, // cs-CZ
|
||||
0x07d00000: 96, // cu
|
||||
0x07d00104: 97, // cu-RU
|
||||
0x07f00000: 98, // cy
|
||||
0x07f00079: 99, // cy-GB
|
||||
0x08000000: 100, // da
|
||||
0x08000062: 101, // da-DK
|
||||
0x08000080: 102, // da-GL
|
||||
0x08300000: 103, // dav
|
||||
0x083000a2: 104, // dav-KE
|
||||
0x08500000: 105, // de
|
||||
0x0850002d: 106, // de-AT
|
||||
0x08500035: 107, // de-BE
|
||||
0x0850004d: 108, // de-CH
|
||||
0x0850005f: 109, // de-DE
|
||||
0x085000b0: 110, // de-LI
|
||||
0x085000b5: 111, // de-LU
|
||||
0x08800000: 112, // dje
|
||||
0x088000d2: 113, // dje-NE
|
||||
0x08b00000: 114, // dsb
|
||||
0x08b0005f: 115, // dsb-DE
|
||||
0x08f00000: 116, // dua
|
||||
0x08f00051: 117, // dua-CM
|
||||
0x09000000: 118, // dv
|
||||
0x09100000: 119, // dyo
|
||||
0x09100112: 120, // dyo-SN
|
||||
0x09300000: 121, // dz
|
||||
0x09300042: 122, // dz-BT
|
||||
0x09400000: 123, // ebu
|
||||
0x094000a2: 124, // ebu-KE
|
||||
0x09500000: 125, // ee
|
||||
0x0950007e: 126, // ee-GH
|
||||
0x09500120: 127, // ee-TG
|
||||
0x09a00000: 128, // el
|
||||
0x09a0005c: 129, // el-CY
|
||||
0x09a00085: 130, // el-GR
|
||||
0x09b00000: 131, // en
|
||||
0x09b00001: 132, // en-001
|
||||
0x09b0001a: 133, // en-150
|
||||
0x09b00024: 134, // en-AG
|
||||
0x09b00025: 135, // en-AI
|
||||
0x09b0002c: 136, // en-AS
|
||||
0x09b0002d: 137, // en-AT
|
||||
0x09b0002e: 138, // en-AU
|
||||
0x09b00033: 139, // en-BB
|
||||
0x09b00035: 140, // en-BE
|
||||
0x09b00039: 141, // en-BI
|
||||
0x09b0003c: 142, // en-BM
|
||||
0x09b00041: 143, // en-BS
|
||||
0x09b00045: 144, // en-BW
|
||||
0x09b00047: 145, // en-BZ
|
||||
0x09b00048: 146, // en-CA
|
||||
0x09b00049: 147, // en-CC
|
||||
0x09b0004d: 148, // en-CH
|
||||
0x09b0004f: 149, // en-CK
|
||||
0x09b00051: 150, // en-CM
|
||||
0x09b0005b: 151, // en-CX
|
||||
0x09b0005c: 152, // en-CY
|
||||
0x09b0005f: 153, // en-DE
|
||||
0x09b00060: 154, // en-DG
|
||||
0x09b00062: 155, // en-DK
|
||||
0x09b00063: 156, // en-DM
|
||||
0x09b0006c: 157, // en-ER
|
||||
0x09b00070: 158, // en-FI
|
||||
0x09b00071: 159, // en-FJ
|
||||
0x09b00072: 160, // en-FK
|
||||
0x09b00073: 161, // en-FM
|
||||
0x09b00079: 162, // en-GB
|
||||
0x09b0007a: 163, // en-GD
|
||||
0x09b0007d: 164, // en-GG
|
||||
0x09b0007e: 165, // en-GH
|
||||
0x09b0007f: 166, // en-GI
|
||||
0x09b00081: 167, // en-GM
|
||||
0x09b00088: 168, // en-GU
|
||||
0x09b0008a: 169, // en-GY
|
||||
0x09b0008b: 170, // en-HK
|
||||
0x09b00094: 171, // en-IE
|
||||
0x09b00095: 172, // en-IL
|
||||
0x09b00096: 173, // en-IM
|
||||
0x09b00097: 174, // en-IN
|
||||
0x09b00098: 175, // en-IO
|
||||
0x09b0009d: 176, // en-JE
|
||||
0x09b0009e: 177, // en-JM
|
||||
0x09b000a2: 178, // en-KE
|
||||
0x09b000a5: 179, // en-KI
|
||||
0x09b000a7: 180, // en-KN
|
||||
0x09b000ab: 181, // en-KY
|
||||
0x09b000af: 182, // en-LC
|
||||
0x09b000b2: 183, // en-LR
|
||||
0x09b000b3: 184, // en-LS
|
||||
0x09b000bd: 185, // en-MG
|
||||
0x09b000be: 186, // en-MH
|
||||
0x09b000c4: 187, // en-MO
|
||||
0x09b000c5: 188, // en-MP
|
||||
0x09b000c8: 189, // en-MS
|
||||
0x09b000c9: 190, // en-MT
|
||||
0x09b000ca: 191, // en-MU
|
||||
0x09b000cc: 192, // en-MW
|
||||
0x09b000ce: 193, // en-MY
|
||||
0x09b000d0: 194, // en-NA
|
||||
0x09b000d3: 195, // en-NF
|
||||
0x09b000d4: 196, // en-NG
|
||||
0x09b000d7: 197, // en-NL
|
||||
0x09b000db: 198, // en-NR
|
||||
0x09b000dd: 199, // en-NU
|
||||
0x09b000de: 200, // en-NZ
|
||||
0x09b000e4: 201, // en-PG
|
||||
0x09b000e5: 202, // en-PH
|
||||
0x09b000e6: 203, // en-PK
|
||||
0x09b000e9: 204, // en-PN
|
||||
0x09b000ea: 205, // en-PR
|
||||
0x09b000ee: 206, // en-PW
|
||||
0x09b00105: 207, // en-RW
|
||||
0x09b00107: 208, // en-SB
|
||||
0x09b00108: 209, // en-SC
|
||||
0x09b00109: 210, // en-SD
|
||||
0x09b0010a: 211, // en-SE
|
||||
0x09b0010b: 212, // en-SG
|
||||
0x09b0010c: 213, // en-SH
|
||||
0x09b0010d: 214, // en-SI
|
||||
0x09b00110: 215, // en-SL
|
||||
0x09b00115: 216, // en-SS
|
||||
0x09b00119: 217, // en-SX
|
||||
0x09b0011b: 218, // en-SZ
|
||||
0x09b0011d: 219, // en-TC
|
||||
0x09b00123: 220, // en-TK
|
||||
0x09b00127: 221, // en-TO
|
||||
0x09b0012a: 222, // en-TT
|
||||
0x09b0012b: 223, // en-TV
|
||||
0x09b0012d: 224, // en-TZ
|
||||
0x09b0012f: 225, // en-UG
|
||||
0x09b00131: 226, // en-UM
|
||||
0x09b00132: 227, // en-US
|
||||
0x09b00136: 228, // en-VC
|
||||
0x09b00139: 229, // en-VG
|
||||
0x09b0013a: 230, // en-VI
|
||||
0x09b0013c: 231, // en-VU
|
||||
0x09b0013f: 232, // en-WS
|
||||
0x09b0015e: 233, // en-ZA
|
||||
0x09b0015f: 234, // en-ZM
|
||||
0x09b00161: 235, // en-ZW
|
||||
0x09c00000: 236, // eo
|
||||
0x09c00001: 237, // eo-001
|
||||
0x09d00000: 238, // es
|
||||
0x09d0001e: 239, // es-419
|
||||
0x09d0002b: 240, // es-AR
|
||||
0x09d0003e: 241, // es-BO
|
||||
0x09d00040: 242, // es-BR
|
||||
0x09d00050: 243, // es-CL
|
||||
0x09d00053: 244, // es-CO
|
||||
0x09d00055: 245, // es-CR
|
||||
0x09d00058: 246, // es-CU
|
||||
0x09d00064: 247, // es-DO
|
||||
0x09d00067: 248, // es-EA
|
||||
0x09d00068: 249, // es-EC
|
||||
0x09d0006d: 250, // es-ES
|
||||
0x09d00084: 251, // es-GQ
|
||||
0x09d00087: 252, // es-GT
|
||||
0x09d0008d: 253, // es-HN
|
||||
0x09d00092: 254, // es-IC
|
||||
0x09d000cd: 255, // es-MX
|
||||
0x09d000d6: 256, // es-NI
|
||||
0x09d000e0: 257, // es-PA
|
||||
0x09d000e2: 258, // es-PE
|
||||
0x09d000e5: 259, // es-PH
|
||||
0x09d000ea: 260, // es-PR
|
||||
0x09d000ef: 261, // es-PY
|
||||
0x09d00118: 262, // es-SV
|
||||
0x09d00132: 263, // es-US
|
||||
0x09d00133: 264, // es-UY
|
||||
0x09d00138: 265, // es-VE
|
||||
0x09f00000: 266, // et
|
||||
0x09f00069: 267, // et-EE
|
||||
0x0a100000: 268, // eu
|
||||
0x0a10006d: 269, // eu-ES
|
||||
0x0a200000: 270, // ewo
|
||||
0x0a200051: 271, // ewo-CM
|
||||
0x0a400000: 272, // fa
|
||||
0x0a400023: 273, // fa-AF
|
||||
0x0a40009a: 274, // fa-IR
|
||||
0x0a600000: 275, // ff
|
||||
0x0a600051: 276, // ff-CM
|
||||
0x0a600082: 277, // ff-GN
|
||||
0x0a6000c7: 278, // ff-MR
|
||||
0x0a600112: 279, // ff-SN
|
||||
0x0a800000: 280, // fi
|
||||
0x0a800070: 281, // fi-FI
|
||||
0x0aa00000: 282, // fil
|
||||
0x0aa000e5: 283, // fil-PH
|
||||
0x0ad00000: 284, // fo
|
||||
0x0ad00062: 285, // fo-DK
|
||||
0x0ad00074: 286, // fo-FO
|
||||
0x0af00000: 287, // fr
|
||||
0x0af00035: 288, // fr-BE
|
||||
0x0af00036: 289, // fr-BF
|
||||
0x0af00039: 290, // fr-BI
|
||||
0x0af0003a: 291, // fr-BJ
|
||||
0x0af0003b: 292, // fr-BL
|
||||
0x0af00048: 293, // fr-CA
|
||||
0x0af0004a: 294, // fr-CD
|
||||
0x0af0004b: 295, // fr-CF
|
||||
0x0af0004c: 296, // fr-CG
|
||||
0x0af0004d: 297, // fr-CH
|
||||
0x0af0004e: 298, // fr-CI
|
||||
0x0af00051: 299, // fr-CM
|
||||
0x0af00061: 300, // fr-DJ
|
||||
0x0af00066: 301, // fr-DZ
|
||||
0x0af00076: 302, // fr-FR
|
||||
0x0af00078: 303, // fr-GA
|
||||
0x0af0007c: 304, // fr-GF
|
||||
0x0af00082: 305, // fr-GN
|
||||
0x0af00083: 306, // fr-GP
|
||||
0x0af00084: 307, // fr-GQ
|
||||
0x0af0008f: 308, // fr-HT
|
||||
0x0af000a6: 309, // fr-KM
|
||||
0x0af000b5: 310, // fr-LU
|
||||
0x0af000b8: 311, // fr-MA
|
||||
0x0af000b9: 312, // fr-MC
|
||||
0x0af000bc: 313, // fr-MF
|
||||
0x0af000bd: 314, // fr-MG
|
||||
0x0af000c1: 315, // fr-ML
|
||||
0x0af000c6: 316, // fr-MQ
|
||||
0x0af000c7: 317, // fr-MR
|
||||
0x0af000ca: 318, // fr-MU
|
||||
0x0af000d1: 319, // fr-NC
|
||||
0x0af000d2: 320, // fr-NE
|
||||
0x0af000e3: 321, // fr-PF
|
||||
0x0af000e8: 322, // fr-PM
|
||||
0x0af00100: 323, // fr-RE
|
||||
0x0af00105: 324, // fr-RW
|
||||
0x0af00108: 325, // fr-SC
|
||||
0x0af00112: 326, // fr-SN
|
||||
0x0af0011a: 327, // fr-SY
|
||||
0x0af0011e: 328, // fr-TD
|
||||
0x0af00120: 329, // fr-TG
|
||||
0x0af00126: 330, // fr-TN
|
||||
0x0af0013c: 331, // fr-VU
|
||||
0x0af0013d: 332, // fr-WF
|
||||
0x0af0015c: 333, // fr-YT
|
||||
0x0b600000: 334, // fur
|
||||
0x0b60009c: 335, // fur-IT
|
||||
0x0b900000: 336, // fy
|
||||
0x0b9000d7: 337, // fy-NL
|
||||
0x0ba00000: 338, // ga
|
||||
0x0ba00094: 339, // ga-IE
|
||||
0x0c200000: 340, // gd
|
||||
0x0c200079: 341, // gd-GB
|
||||
0x0c800000: 342, // gl
|
||||
0x0c80006d: 343, // gl-ES
|
||||
0x0d200000: 344, // gsw
|
||||
0x0d20004d: 345, // gsw-CH
|
||||
0x0d200076: 346, // gsw-FR
|
||||
0x0d2000b0: 347, // gsw-LI
|
||||
0x0d300000: 348, // gu
|
||||
0x0d300097: 349, // gu-IN
|
||||
0x0d700000: 350, // guw
|
||||
0x0d800000: 351, // guz
|
||||
0x0d8000a2: 352, // guz-KE
|
||||
0x0d900000: 353, // gv
|
||||
0x0d900096: 354, // gv-IM
|
||||
0x0dc00000: 355, // ha
|
||||
0x0dc0007e: 356, // ha-GH
|
||||
0x0dc000d2: 357, // ha-NE
|
||||
0x0dc000d4: 358, // ha-NG
|
||||
0x0de00000: 359, // haw
|
||||
0x0de00132: 360, // haw-US
|
||||
0x0e000000: 361, // he
|
||||
0x0e000095: 362, // he-IL
|
||||
0x0e100000: 363, // hi
|
||||
0x0e100097: 364, // hi-IN
|
||||
0x0ee00000: 365, // hr
|
||||
0x0ee00032: 366, // hr-BA
|
||||
0x0ee0008e: 367, // hr-HR
|
||||
0x0ef00000: 368, // hsb
|
||||
0x0ef0005f: 369, // hsb-DE
|
||||
0x0f200000: 370, // hu
|
||||
0x0f200090: 371, // hu-HU
|
||||
0x0f300000: 372, // hy
|
||||
0x0f300027: 373, // hy-AM
|
||||
0x0f800000: 374, // id
|
||||
0x0f800093: 375, // id-ID
|
||||
0x0fa00000: 376, // ig
|
||||
0x0fa000d4: 377, // ig-NG
|
||||
0x0fb00000: 378, // ii
|
||||
0x0fb00052: 379, // ii-CN
|
||||
0x10200000: 380, // is
|
||||
0x1020009b: 381, // is-IS
|
||||
0x10300000: 382, // it
|
||||
0x1030004d: 383, // it-CH
|
||||
0x1030009c: 384, // it-IT
|
||||
0x10300111: 385, // it-SM
|
||||
0x10400000: 386, // iu
|
||||
0x10700000: 387, // ja
|
||||
0x107000a0: 388, // ja-JP
|
||||
0x10900000: 389, // jbo
|
||||
0x10a00000: 390, // jgo
|
||||
0x10a00051: 391, // jgo-CM
|
||||
0x10c00000: 392, // jmc
|
||||
0x10c0012d: 393, // jmc-TZ
|
||||
0x10f00000: 394, // jv
|
||||
0x11100000: 395, // ka
|
||||
0x1110007b: 396, // ka-GE
|
||||
0x11300000: 397, // kab
|
||||
0x11300066: 398, // kab-DZ
|
||||
0x11500000: 399, // kaj
|
||||
0x11600000: 400, // kam
|
||||
0x116000a2: 401, // kam-KE
|
||||
0x11900000: 402, // kcg
|
||||
0x11b00000: 403, // kde
|
||||
0x11b0012d: 404, // kde-TZ
|
||||
0x11d00000: 405, // kea
|
||||
0x11d00059: 406, // kea-CV
|
||||
0x12800000: 407, // khq
|
||||
0x128000c1: 408, // khq-ML
|
||||
0x12b00000: 409, // ki
|
||||
0x12b000a2: 410, // ki-KE
|
||||
0x12f00000: 411, // kk
|
||||
0x12f000ac: 412, // kk-KZ
|
||||
0x13000000: 413, // kkj
|
||||
0x13000051: 414, // kkj-CM
|
||||
0x13100000: 415, // kl
|
||||
0x13100080: 416, // kl-GL
|
||||
0x13200000: 417, // kln
|
||||
0x132000a2: 418, // kln-KE
|
||||
0x13300000: 419, // km
|
||||
0x133000a4: 420, // km-KH
|
||||
0x13500000: 421, // kn
|
||||
0x13500097: 422, // kn-IN
|
||||
0x13600000: 423, // ko
|
||||
0x136000a8: 424, // ko-KP
|
||||
0x136000a9: 425, // ko-KR
|
||||
0x13800000: 426, // kok
|
||||
0x13800097: 427, // kok-IN
|
||||
0x14100000: 428, // ks
|
||||
0x14100097: 429, // ks-IN
|
||||
0x14200000: 430, // ksb
|
||||
0x1420012d: 431, // ksb-TZ
|
||||
0x14300000: 432, // ksf
|
||||
0x14300051: 433, // ksf-CM
|
||||
0x14400000: 434, // ksh
|
||||
0x1440005f: 435, // ksh-DE
|
||||
0x14500000: 436, // ku
|
||||
0x14a00000: 437, // kw
|
||||
0x14a00079: 438, // kw-GB
|
||||
0x14d00000: 439, // ky
|
||||
0x14d000a3: 440, // ky-KG
|
||||
0x15100000: 441, // lag
|
||||
0x1510012d: 442, // lag-TZ
|
||||
0x15400000: 443, // lb
|
||||
0x154000b5: 444, // lb-LU
|
||||
0x15a00000: 445, // lg
|
||||
0x15a0012f: 446, // lg-UG
|
||||
0x16100000: 447, // lkt
|
||||
0x16100132: 448, // lkt-US
|
||||
0x16400000: 449, // ln
|
||||
0x16400029: 450, // ln-AO
|
||||
0x1640004a: 451, // ln-CD
|
||||
0x1640004b: 452, // ln-CF
|
||||
0x1640004c: 453, // ln-CG
|
||||
0x16500000: 454, // lo
|
||||
0x165000ad: 455, // lo-LA
|
||||
0x16800000: 456, // lrc
|
||||
0x16800099: 457, // lrc-IQ
|
||||
0x1680009a: 458, // lrc-IR
|
||||
0x16900000: 459, // lt
|
||||
0x169000b4: 460, // lt-LT
|
||||
0x16b00000: 461, // lu
|
||||
0x16b0004a: 462, // lu-CD
|
||||
0x16d00000: 463, // luo
|
||||
0x16d000a2: 464, // luo-KE
|
||||
0x16e00000: 465, // luy
|
||||
0x16e000a2: 466, // luy-KE
|
||||
0x17000000: 467, // lv
|
||||
0x170000b6: 468, // lv-LV
|
||||
0x17a00000: 469, // mas
|
||||
0x17a000a2: 470, // mas-KE
|
||||
0x17a0012d: 471, // mas-TZ
|
||||
0x18000000: 472, // mer
|
||||
0x180000a2: 473, // mer-KE
|
||||
0x18200000: 474, // mfe
|
||||
0x182000ca: 475, // mfe-MU
|
||||
0x18300000: 476, // mg
|
||||
0x183000bd: 477, // mg-MG
|
||||
0x18400000: 478, // mgh
|
||||
0x184000cf: 479, // mgh-MZ
|
||||
0x18500000: 480, // mgo
|
||||
0x18500051: 481, // mgo-CM
|
||||
0x18c00000: 482, // mk
|
||||
0x18c000c0: 483, // mk-MK
|
||||
0x18d00000: 484, // ml
|
||||
0x18d00097: 485, // ml-IN
|
||||
0x18f00000: 486, // mn
|
||||
0x18f000c3: 487, // mn-MN
|
||||
0x19600000: 488, // mr
|
||||
0x19600097: 489, // mr-IN
|
||||
0x19a00000: 490, // ms
|
||||
0x19a0003d: 491, // ms-BN
|
||||
0x19a000ce: 492, // ms-MY
|
||||
0x19a0010b: 493, // ms-SG
|
||||
0x19b00000: 494, // mt
|
||||
0x19b000c9: 495, // mt-MT
|
||||
0x19d00000: 496, // mua
|
||||
0x19d00051: 497, // mua-CM
|
||||
0x1a500000: 498, // my
|
||||
0x1a5000c2: 499, // my-MM
|
||||
0x1a900000: 500, // mzn
|
||||
0x1a90009a: 501, // mzn-IR
|
||||
0x1ab00000: 502, // nah
|
||||
0x1ae00000: 503, // naq
|
||||
0x1ae000d0: 504, // naq-NA
|
||||
0x1af00000: 505, // nb
|
||||
0x1af000d8: 506, // nb-NO
|
||||
0x1af0010e: 507, // nb-SJ
|
||||
0x1b100000: 508, // nd
|
||||
0x1b100161: 509, // nd-ZW
|
||||
0x1b400000: 510, // ne
|
||||
0x1b400097: 511, // ne-IN
|
||||
0x1b4000d9: 512, // ne-NP
|
||||
0x1bd00000: 513, // nl
|
||||
0x1bd0002f: 514, // nl-AW
|
||||
0x1bd00035: 515, // nl-BE
|
||||
0x1bd0003f: 516, // nl-BQ
|
||||
0x1bd0005a: 517, // nl-CW
|
||||
0x1bd000d7: 518, // nl-NL
|
||||
0x1bd00114: 519, // nl-SR
|
||||
0x1bd00119: 520, // nl-SX
|
||||
0x1be00000: 521, // nmg
|
||||
0x1be00051: 522, // nmg-CM
|
||||
0x1bf00000: 523, // nn
|
||||
0x1bf000d8: 524, // nn-NO
|
||||
0x1c000000: 525, // nnh
|
||||
0x1c000051: 526, // nnh-CM
|
||||
0x1c100000: 527, // no
|
||||
0x1c500000: 528, // nqo
|
||||
0x1c600000: 529, // nr
|
||||
0x1c800000: 530, // nso
|
||||
0x1c900000: 531, // nus
|
||||
0x1c900115: 532, // nus-SS
|
||||
0x1cc00000: 533, // ny
|
||||
0x1ce00000: 534, // nyn
|
||||
0x1ce0012f: 535, // nyn-UG
|
||||
0x1d200000: 536, // om
|
||||
0x1d20006e: 537, // om-ET
|
||||
0x1d2000a2: 538, // om-KE
|
||||
0x1d300000: 539, // or
|
||||
0x1d300097: 540, // or-IN
|
||||
0x1d400000: 541, // os
|
||||
0x1d40007b: 542, // os-GE
|
||||
0x1d400104: 543, // os-RU
|
||||
0x1d700000: 544, // pa
|
||||
0x1d705000: 545, // pa-Arab
|
||||
0x1d7050e6: 546, // pa-Arab-PK
|
||||
0x1d72f000: 547, // pa-Guru
|
||||
0x1d72f097: 548, // pa-Guru-IN
|
||||
0x1db00000: 549, // pap
|
||||
0x1e700000: 550, // pl
|
||||
0x1e7000e7: 551, // pl-PL
|
||||
0x1ed00000: 552, // prg
|
||||
0x1ed00001: 553, // prg-001
|
||||
0x1ee00000: 554, // ps
|
||||
0x1ee00023: 555, // ps-AF
|
||||
0x1ef00000: 556, // pt
|
||||
0x1ef00029: 557, // pt-AO
|
||||
0x1ef00040: 558, // pt-BR
|
||||
0x1ef0004d: 559, // pt-CH
|
||||
0x1ef00059: 560, // pt-CV
|
||||
0x1ef00084: 561, // pt-GQ
|
||||
0x1ef00089: 562, // pt-GW
|
||||
0x1ef000b5: 563, // pt-LU
|
||||
0x1ef000c4: 564, // pt-MO
|
||||
0x1ef000cf: 565, // pt-MZ
|
||||
0x1ef000ec: 566, // pt-PT
|
||||
0x1ef00116: 567, // pt-ST
|
||||
0x1ef00124: 568, // pt-TL
|
||||
0x1f100000: 569, // qu
|
||||
0x1f10003e: 570, // qu-BO
|
||||
0x1f100068: 571, // qu-EC
|
||||
0x1f1000e2: 572, // qu-PE
|
||||
0x1fc00000: 573, // rm
|
||||
0x1fc0004d: 574, // rm-CH
|
||||
0x20100000: 575, // rn
|
||||
0x20100039: 576, // rn-BI
|
||||
0x20300000: 577, // ro
|
||||
0x203000ba: 578, // ro-MD
|
||||
0x20300102: 579, // ro-RO
|
||||
0x20500000: 580, // rof
|
||||
0x2050012d: 581, // rof-TZ
|
||||
0x20700000: 582, // ru
|
||||
0x20700046: 583, // ru-BY
|
||||
0x207000a3: 584, // ru-KG
|
||||
0x207000ac: 585, // ru-KZ
|
||||
0x207000ba: 586, // ru-MD
|
||||
0x20700104: 587, // ru-RU
|
||||
0x2070012e: 588, // ru-UA
|
||||
0x20a00000: 589, // rw
|
||||
0x20a00105: 590, // rw-RW
|
||||
0x20b00000: 591, // rwk
|
||||
0x20b0012d: 592, // rwk-TZ
|
||||
0x20f00000: 593, // sah
|
||||
0x20f00104: 594, // sah-RU
|
||||
0x21000000: 595, // saq
|
||||
0x210000a2: 596, // saq-KE
|
||||
0x21400000: 597, // sbp
|
||||
0x2140012d: 598, // sbp-TZ
|
||||
0x21c00000: 599, // sdh
|
||||
0x21d00000: 600, // se
|
||||
0x21d00070: 601, // se-FI
|
||||
0x21d000d8: 602, // se-NO
|
||||
0x21d0010a: 603, // se-SE
|
||||
0x21f00000: 604, // seh
|
||||
0x21f000cf: 605, // seh-MZ
|
||||
0x22100000: 606, // ses
|
||||
0x221000c1: 607, // ses-ML
|
||||
0x22200000: 608, // sg
|
||||
0x2220004b: 609, // sg-CF
|
||||
0x22600000: 610, // shi
|
||||
0x22652000: 611, // shi-Latn
|
||||
0x226520b8: 612, // shi-Latn-MA
|
||||
0x226d2000: 613, // shi-Tfng
|
||||
0x226d20b8: 614, // shi-Tfng-MA
|
||||
0x22800000: 615, // si
|
||||
0x228000b1: 616, // si-LK
|
||||
0x22a00000: 617, // sk
|
||||
0x22a0010f: 618, // sk-SK
|
||||
0x22c00000: 619, // sl
|
||||
0x22c0010d: 620, // sl-SI
|
||||
0x23000000: 621, // sma
|
||||
0x23100000: 622, // smi
|
||||
0x23200000: 623, // smj
|
||||
0x23300000: 624, // smn
|
||||
0x23300070: 625, // smn-FI
|
||||
0x23500000: 626, // sms
|
||||
0x23600000: 627, // sn
|
||||
0x23600161: 628, // sn-ZW
|
||||
0x23800000: 629, // so
|
||||
0x23800061: 630, // so-DJ
|
||||
0x2380006e: 631, // so-ET
|
||||
0x238000a2: 632, // so-KE
|
||||
0x23800113: 633, // so-SO
|
||||
0x23a00000: 634, // sq
|
||||
0x23a00026: 635, // sq-AL
|
||||
0x23a000c0: 636, // sq-MK
|
||||
0x23a0014a: 637, // sq-XK
|
||||
0x23b00000: 638, // sr
|
||||
0x23b1e000: 639, // sr-Cyrl
|
||||
0x23b1e032: 640, // sr-Cyrl-BA
|
||||
0x23b1e0bb: 641, // sr-Cyrl-ME
|
||||
0x23b1e103: 642, // sr-Cyrl-RS
|
||||
0x23b1e14a: 643, // sr-Cyrl-XK
|
||||
0x23b52000: 644, // sr-Latn
|
||||
0x23b52032: 645, // sr-Latn-BA
|
||||
0x23b520bb: 646, // sr-Latn-ME
|
||||
0x23b52103: 647, // sr-Latn-RS
|
||||
0x23b5214a: 648, // sr-Latn-XK
|
||||
0x24000000: 649, // ss
|
||||
0x24100000: 650, // ssy
|
||||
0x24200000: 651, // st
|
||||
0x24700000: 652, // sv
|
||||
0x24700030: 653, // sv-AX
|
||||
0x24700070: 654, // sv-FI
|
||||
0x2470010a: 655, // sv-SE
|
||||
0x24800000: 656, // sw
|
||||
0x2480004a: 657, // sw-CD
|
||||
0x248000a2: 658, // sw-KE
|
||||
0x2480012d: 659, // sw-TZ
|
||||
0x2480012f: 660, // sw-UG
|
||||
0x24f00000: 661, // syr
|
||||
0x25100000: 662, // ta
|
||||
0x25100097: 663, // ta-IN
|
||||
0x251000b1: 664, // ta-LK
|
||||
0x251000ce: 665, // ta-MY
|
||||
0x2510010b: 666, // ta-SG
|
||||
0x25800000: 667, // te
|
||||
0x25800097: 668, // te-IN
|
||||
0x25a00000: 669, // teo
|
||||
0x25a000a2: 670, // teo-KE
|
||||
0x25a0012f: 671, // teo-UG
|
||||
0x25d00000: 672, // th
|
||||
0x25d00121: 673, // th-TH
|
||||
0x26100000: 674, // ti
|
||||
0x2610006c: 675, // ti-ER
|
||||
0x2610006e: 676, // ti-ET
|
||||
0x26200000: 677, // tig
|
||||
0x26400000: 678, // tk
|
||||
0x26400125: 679, // tk-TM
|
||||
0x26b00000: 680, // tn
|
||||
0x26c00000: 681, // to
|
||||
0x26c00127: 682, // to-TO
|
||||
0x26f00000: 683, // tr
|
||||
0x26f0005c: 684, // tr-CY
|
||||
0x26f00129: 685, // tr-TR
|
||||
0x27200000: 686, // ts
|
||||
0x27e00000: 687, // twq
|
||||
0x27e000d2: 688, // twq-NE
|
||||
0x28200000: 689, // tzm
|
||||
0x282000b8: 690, // tzm-MA
|
||||
0x28400000: 691, // ug
|
||||
0x28400052: 692, // ug-CN
|
||||
0x28600000: 693, // uk
|
||||
0x2860012e: 694, // uk-UA
|
||||
0x28c00000: 695, // ur
|
||||
0x28c00097: 696, // ur-IN
|
||||
0x28c000e6: 697, // ur-PK
|
||||
0x28d00000: 698, // uz
|
||||
0x28d05000: 699, // uz-Arab
|
||||
0x28d05023: 700, // uz-Arab-AF
|
||||
0x28d1e000: 701, // uz-Cyrl
|
||||
0x28d1e134: 702, // uz-Cyrl-UZ
|
||||
0x28d52000: 703, // uz-Latn
|
||||
0x28d52134: 704, // uz-Latn-UZ
|
||||
0x28e00000: 705, // vai
|
||||
0x28e52000: 706, // vai-Latn
|
||||
0x28e520b2: 707, // vai-Latn-LR
|
||||
0x28ed9000: 708, // vai-Vaii
|
||||
0x28ed90b2: 709, // vai-Vaii-LR
|
||||
0x28f00000: 710, // ve
|
||||
0x29200000: 711, // vi
|
||||
0x2920013b: 712, // vi-VN
|
||||
0x29700000: 713, // vo
|
||||
0x29700001: 714, // vo-001
|
||||
0x29a00000: 715, // vun
|
||||
0x29a0012d: 716, // vun-TZ
|
||||
0x29b00000: 717, // wa
|
||||
0x29c00000: 718, // wae
|
||||
0x29c0004d: 719, // wae-CH
|
||||
0x2a400000: 720, // wo
|
||||
0x2a900000: 721, // xh
|
||||
0x2b100000: 722, // xog
|
||||
0x2b10012f: 723, // xog-UG
|
||||
0x2b700000: 724, // yav
|
||||
0x2b700051: 725, // yav-CM
|
||||
0x2b900000: 726, // yi
|
||||
0x2b900001: 727, // yi-001
|
||||
0x2ba00000: 728, // yo
|
||||
0x2ba0003a: 729, // yo-BJ
|
||||
0x2ba000d4: 730, // yo-NG
|
||||
0x2bd00000: 731, // yue
|
||||
0x2bd0008b: 732, // yue-HK
|
||||
0x2c300000: 733, // zgh
|
||||
0x2c3000b8: 734, // zgh-MA
|
||||
0x2c400000: 735, // zh
|
||||
0x2c434000: 736, // zh-Hans
|
||||
0x2c434052: 737, // zh-Hans-CN
|
||||
0x2c43408b: 738, // zh-Hans-HK
|
||||
0x2c4340c4: 739, // zh-Hans-MO
|
||||
0x2c43410b: 740, // zh-Hans-SG
|
||||
0x2c435000: 741, // zh-Hant
|
||||
0x2c43508b: 742, // zh-Hant-HK
|
||||
0x2c4350c4: 743, // zh-Hant-MO
|
||||
0x2c43512c: 744, // zh-Hant-TW
|
||||
0x2c600000: 745, // zu
|
||||
0x2c60015e: 746, // zu-ZA
|
||||
}
|
||||
|
||||
// Total table size 4550 bytes (4KiB); checksum: B6D49547
|
File diff suppressed because it is too large
Load Diff
|
@ -1,23 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
before_script:
|
||||
- go get github.com/meatballhat/gfmxr/...
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
- gfmxr -c $(grep -c 'package main' README.md) -s README.md
|
|
@ -1,21 +0,0 @@
|
|||
Copyright (C) 2013 Jeremy Saenz
|
||||
All Rights Reserved.
|
||||
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,579 +0,0 @@
|
|||
[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli)
|
||||
[![Build Status](https://travis-ci.org/codegangsta/cli.svg?branch=master)](https://travis-ci.org/codegangsta/cli)
|
||||
[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli)
|
||||
[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-codegangsta-cli)
|
||||
[![Go Report Card](https://goreportcard.com/badge/codegangsta/cli)](https://goreportcard.com/report/codegangsta/cli)
|
||||
|
||||
# cli
|
||||
|
||||
cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||
|
||||
## Overview
|
||||
|
||||
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
||||
|
||||
**This is where cli comes into play.** cli makes command line programming fun, organized, and expressive!
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||
|
||||
To install cli, simply run:
|
||||
```
|
||||
$ go get github.com/codegangsta/cli
|
||||
```
|
||||
|
||||
Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used:
|
||||
```
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
One of the philosophies behind cli is that an API should be playful and full of discovery. So a cli app can be as little as one line of code in `main()`.
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli.NewApp().Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation:
|
||||
|
||||
<!-- {
|
||||
"output": "boom! I say!"
|
||||
} -->
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "boom"
|
||||
app.Usage = "make an explosive entrance"
|
||||
app.Action = func(c *cli.Context) error {
|
||||
fmt.Println("boom! I say!")
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below.
|
||||
|
||||
## Example
|
||||
|
||||
Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness!
|
||||
|
||||
Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it:
|
||||
|
||||
<!-- {
|
||||
"output": "Hello friend!"
|
||||
} -->
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.Usage = "fight the loneliness!"
|
||||
app.Action = func(c *cli.Context) error {
|
||||
fmt.Println("Hello friend!")
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Install our command to the `$GOPATH/bin` directory:
|
||||
|
||||
```
|
||||
$ go install
|
||||
```
|
||||
|
||||
Finally run our new command:
|
||||
|
||||
```
|
||||
$ greet
|
||||
Hello friend!
|
||||
```
|
||||
|
||||
cli also generates neat help text:
|
||||
|
||||
```
|
||||
$ greet help
|
||||
NAME:
|
||||
greet - fight the loneliness!
|
||||
|
||||
USAGE:
|
||||
greet [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
0.0.0
|
||||
|
||||
COMMANDS:
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
GLOBAL OPTIONS
|
||||
--version Shows version information
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
You can lookup arguments by calling the `Args` function on `cli.Context`.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Action = func(c *cli.Context) error {
|
||||
fmt.Println("Hello", c.Args()[0])
|
||||
return nil
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Flags
|
||||
|
||||
Setting and querying flags is simple.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) error {
|
||||
name := "someone"
|
||||
if c.NArg() > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if c.String("lang") == "spanish" {
|
||||
fmt.Println("Hola", name)
|
||||
} else {
|
||||
fmt.Println("Hello", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
You can also set a destination variable for a flag, to which the content will be scanned.
|
||||
|
||||
``` go
|
||||
...
|
||||
var language string
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
Destination: &language,
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) error {
|
||||
name := "someone"
|
||||
if c.NArg() > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if language == "spanish" {
|
||||
fmt.Println("Hola", name)
|
||||
} else {
|
||||
fmt.Println("Hello", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
||||
|
||||
#### Placeholder Values
|
||||
|
||||
Sometimes it's useful to specify a flag's value within the usage string itself. Such placeholders are
|
||||
indicated with back quotes.
|
||||
|
||||
For example this:
|
||||
|
||||
```go
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
Usage: "Load configuration from `FILE`",
|
||||
}
|
||||
```
|
||||
|
||||
Will result in help output like:
|
||||
|
||||
```
|
||||
--config FILE, -c FILE Load configuration from FILE
|
||||
```
|
||||
|
||||
Note that only the first placeholder is used. Subsequent back-quoted words will be left as-is.
|
||||
|
||||
#### Alternate Names
|
||||
|
||||
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error.
|
||||
|
||||
#### Values from the Environment
|
||||
|
||||
You can also have the default value set from the environment via `EnvVar`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "APP_LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
#### Values from alternate input sources (YAML and others)
|
||||
|
||||
There is a separate package altsrc that adds support for getting flag values from other input sources like YAML.
|
||||
|
||||
In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below:
|
||||
|
||||
``` go
|
||||
altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
|
||||
```
|
||||
|
||||
Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below.
|
||||
|
||||
``` go
|
||||
command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||
```
|
||||
|
||||
The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context.
|
||||
It will then use that file name to initialize the yaml input source for any flags that are defined on that command.
|
||||
As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work.
|
||||
|
||||
Currently only YAML files are supported but developers can add support for other input sources by implementing the
|
||||
altsrc.InputSourceContext for their given sources.
|
||||
|
||||
Here is a more complete sample of a command using YAML support:
|
||||
|
||||
``` go
|
||||
command := &cli.Command{
|
||||
Name: "test-cmd",
|
||||
Aliases: []string{"tc"},
|
||||
Usage: "this is for testing",
|
||||
Description: "testing",
|
||||
Action: func(c *cli.Context) error {
|
||||
// Action to run
|
||||
return nil
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
NewIntFlag(cli.IntFlag{Name: "test"}),
|
||||
cli.StringFlag{Name: "load"}},
|
||||
}
|
||||
command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||
err := command.Run(c)
|
||||
```
|
||||
|
||||
### Subcommands
|
||||
|
||||
Subcommands can be defined for a more git-like command line app.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "add a task to the list",
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("added task: ", c.Args().First())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("completed task: ", c.Args().First())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "template",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "options for task templates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "add a new template",
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("new task template: ", c.Args().First())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "remove an existing template",
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("removed task template: ", c.Args().First())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Subcommands categories
|
||||
|
||||
For additional organization in apps that have many subcommands, you can
|
||||
associate a category for each command to group them together in the help
|
||||
output.
|
||||
|
||||
E.g.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "noop",
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Category: "template",
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Category: "template",
|
||||
},
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
Will include:
|
||||
|
||||
```
|
||||
...
|
||||
COMMANDS:
|
||||
noop
|
||||
|
||||
Template actions:
|
||||
add
|
||||
remove
|
||||
...
|
||||
```
|
||||
|
||||
### Exit code
|
||||
|
||||
Calling `App.Run` will not automatically call `os.Exit`, which means that by
|
||||
default the exit code will "fall through" to being `0`. An explicit exit code
|
||||
may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a
|
||||
`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.:
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolTFlag{
|
||||
Name: "ginger-crouton",
|
||||
Usage: "is it in the soup?",
|
||||
},
|
||||
}
|
||||
app.Action = func(ctx *cli.Context) error {
|
||||
if !ctx.Bool("ginger-crouton") {
|
||||
return cli.NewExitError("it is not in the soup", 86)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
### Bash Completion
|
||||
|
||||
You can enable completion commands by setting the `EnableBashCompletion`
|
||||
flag on the `App` object. By default, this setting will only auto-complete to
|
||||
show an app's subcommands, but you can write your own completion methods for
|
||||
the App or its subcommands.
|
||||
|
||||
```go
|
||||
...
|
||||
var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("completed task: ", c.Args().First())
|
||||
return nil
|
||||
},
|
||||
BashComplete: func(c *cli.Context) {
|
||||
// This will complete if no args are passed
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
}
|
||||
for _, t := range tasks {
|
||||
fmt.Println(t)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
#### To Enable
|
||||
|
||||
Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
|
||||
setting the `PROG` variable to the name of your program:
|
||||
|
||||
`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
|
||||
|
||||
#### To Distribute
|
||||
|
||||
Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
|
||||
it to the name of the program you wish to add autocomplete support for (or
|
||||
automatically install it there if you are distributing a package). Don't forget
|
||||
to source the file to make it active in the current shell.
|
||||
|
||||
```
|
||||
sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
|
||||
source /etc/bash_completion.d/<myprogram>
|
||||
```
|
||||
|
||||
Alternatively, you can just document that users should source the generic
|
||||
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
||||
to the name of their program (as above).
|
||||
|
||||
### Generated Help Text Customization
|
||||
|
||||
All of the help text generation may be customized, and at multiple levels. The
|
||||
templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and
|
||||
`SubcommandHelpTemplate` which may be reassigned or augmented, and full override
|
||||
is possible by assigning a compatible func to the `cli.HelpPrinter` variable,
|
||||
e.g.:
|
||||
|
||||
<!-- {
|
||||
"output": "Ha HA. I pwnd the help!!1"
|
||||
} -->
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// EXAMPLE: Append to an existing template
|
||||
cli.AppHelpTemplate = fmt.Sprintf(`%s
|
||||
|
||||
WEBSITE: http://awesometown.example.com
|
||||
|
||||
SUPPORT: support@awesometown.example.com
|
||||
|
||||
`, cli.AppHelpTemplate)
|
||||
|
||||
// EXAMPLE: Override a template
|
||||
cli.AppHelpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command
|
||||
[command options]{{end}} {{if
|
||||
.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
{{if len .Authors}}
|
||||
AUTHOR(S):
|
||||
{{range .Authors}}{{ . }}{{end}}
|
||||
{{end}}{{if .Commands}}
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"
|
||||
}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
||||
GLOBAL OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}{{if .Copyright }}
|
||||
COPYRIGHT:
|
||||
{{.Copyright}}
|
||||
{{end}}{{if .Version}}
|
||||
VERSION:
|
||||
{{.Version}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// EXAMPLE: Replace the `HelpPrinter` func
|
||||
cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
|
||||
fmt.Println("Ha HA. I pwnd the help!!1")
|
||||
}
|
||||
|
||||
cli.NewApp().Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
||||
|
||||
If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together.
|
||||
|
||||
If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out.
|
|
@ -1,16 +0,0 @@
|
|||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
|
||||
test: off
|
||||
|
||||
deploy: off
|
|
@ -34,7 +34,7 @@
|
|||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
import "time"
|
||||
|
|
@ -1,3 +1,18 @@
|
|||
# Vendored Dependencies
|
||||
|
||||
Dependencies are almost all vendored in at the standard Go `/vendor` path. This allows
|
||||
people to build go-ethereum using the standard toolchain without any particular package
|
||||
manager. It also plays nicely with `go get`, not requiring external code to be relied on.
|
||||
|
||||
The one single dependent package missing from `vendor` is `golang.org/x/net/context`. As
|
||||
this is a package exposed via public library APIs, it must not be vendored as dependent
|
||||
code woulnd't be able to instantiate.
|
||||
|
||||
To allow reproducible builds of go-ethereum nonetheless that don't need network access
|
||||
during build time to fetch `golang.org/x/net/context`, a version was copied into our repo
|
||||
at the very specific `/build/_vendor` path, which is added automatically by all CI build
|
||||
scripts and the makefile too.
|
||||
|
||||
# Debian Packaging
|
||||
|
||||
Tagged releases and develop branch commits are available as installable Debian packages
|
||||
|
|
17
build/ci.go
17
build/ci.go
|
@ -141,7 +141,6 @@ func doInstall(cmdline []string) {
|
|||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Compile packages given as arguments, or everything if there are no arguments.
|
||||
packages := []string{"./..."}
|
||||
if flag.NArg() > 0 {
|
||||
|
@ -178,6 +177,7 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
|||
cmd := exec.Command(gocmd, subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
cmd.Env = []string{
|
||||
"GO15VENDOREXPERIMENT=1",
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"GOBIN=" + GOBIN,
|
||||
}
|
||||
|
@ -200,11 +200,24 @@ func doTest(cmdline []string) {
|
|||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
|
||||
if len(packages) == 1 && packages[0] == "./..." {
|
||||
// Resolve ./... manually since go vet will fail on vendored stuff
|
||||
out, err := goTool("list", "./...").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("package listing failed: %v\n%s", err, string(out))
|
||||
}
|
||||
packages = []string{}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if !strings.Contains(line, "vendor") {
|
||||
packages = append(packages, strings.TrimSpace(line))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Run analysis tools before the tests.
|
||||
if *vet {
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
|
|
|
@ -82,22 +82,6 @@ The makedag command generates an ethash DAG in /tmp/dag.
|
|||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: gpuinfo,
|
||||
Name: "gpuinfo",
|
||||
Usage: "gpuinfo",
|
||||
Description: `
|
||||
Prints OpenCL device info for all found GPUs.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: gpubench,
|
||||
Name: "gpubench",
|
||||
Usage: "benchmark GPU",
|
||||
Description: `
|
||||
Runs quick benchmark on first GPU found.
|
||||
`,
|
||||
},
|
||||
{
|
||||
|
@ -147,7 +131,6 @@ participating.
|
|||
utils.OpposeDAOFork,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MiningGPUFlag,
|
||||
utils.AutoDAGFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.NATFlag,
|
||||
|
@ -312,7 +295,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
}
|
||||
if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name), ctx.GlobalString(utils.MiningGPUFlag.Name)); err != nil {
|
||||
if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -348,31 +331,6 @@ func makedag(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func gpuinfo(ctx *cli.Context) error {
|
||||
eth.PrintOpenCLDevices()
|
||||
return nil
|
||||
}
|
||||
|
||||
func gpubench(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
wrongArgs := func() {
|
||||
utils.Fatalf(`Usage: geth gpubench <gpu number>`)
|
||||
}
|
||||
switch {
|
||||
case len(args) == 1:
|
||||
n, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
wrongArgs()
|
||||
}
|
||||
eth.GPUBench(n)
|
||||
case len(args) == 0:
|
||||
eth.GPUBench(0)
|
||||
default:
|
||||
wrongArgs()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func version(c *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", utils.Version)
|
||||
|
|
|
@ -128,7 +128,6 @@ var AppHelpFlagGroups = []flagGroup{
|
|||
Flags: []cli.Flag{
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningGPUFlag,
|
||||
utils.AutoDAGFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
|
|
|
@ -170,7 +170,6 @@ var (
|
|||
Usage: "Updates the chain rules to oppose the DAO hard-fork",
|
||||
}
|
||||
// Miner settings
|
||||
// TODO: refactor CPU vs GPU mining flags
|
||||
MiningEnabledFlag = cli.BoolFlag{
|
||||
Name: "mine",
|
||||
Usage: "Enable mining",
|
||||
|
@ -180,10 +179,6 @@ var (
|
|||
Usage: "Number of CPU threads to use for mining",
|
||||
Value: runtime.NumCPU(),
|
||||
}
|
||||
MiningGPUFlag = cli.StringFlag{
|
||||
Name: "minergpus",
|
||||
Usage: "List of GPUs to use for mining (e.g. '0,1' will use the first two GPUs found)",
|
||||
}
|
||||
TargetGasLimitFlag = cli.StringFlag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
|
||||
|
|
|
@ -1,262 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package natspec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||
"github.com/ethereum/go-ethereum/common/registrar"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/xeth"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
type abi2method map[[8]byte]*method
|
||||
|
||||
type NatSpec struct {
|
||||
jsvm *otto.Otto
|
||||
abiDocJson []byte
|
||||
userDoc userDoc
|
||||
tx, data string
|
||||
}
|
||||
|
||||
// main entry point for to get natspec notice for a transaction
|
||||
// the implementation is frontend friendly in that it always gives back
|
||||
// a notice that is safe to display
|
||||
// :FIXME: the second return value is an error, which can be used to fine-tune bahaviour
|
||||
func GetNotice(xeth *xeth.XEth, tx string, http *httpclient.HTTPClient) (notice string) {
|
||||
ns, err := New(xeth, tx, http)
|
||||
if err != nil {
|
||||
if ns == nil {
|
||||
return getFallbackNotice(fmt.Sprintf("no NatSpec info found for contract: %v", err), tx)
|
||||
} else {
|
||||
return getFallbackNotice(fmt.Sprintf("invalid NatSpec info: %v", err), tx)
|
||||
}
|
||||
}
|
||||
|
||||
notice, err = ns.Notice()
|
||||
if err != nil {
|
||||
return getFallbackNotice(fmt.Sprintf("NatSpec notice error: %v", err), tx)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFallbackNotice(comment, tx string) string {
|
||||
return fmt.Sprintf("About to submit transaction (%s): %s", comment, tx)
|
||||
}
|
||||
|
||||
type transaction struct {
|
||||
To string `json:"to"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type jsonTx struct {
|
||||
Params []transaction `json:"params"`
|
||||
}
|
||||
|
||||
type contractInfo struct {
|
||||
Source string `json:"source"`
|
||||
Language string `json:"language"`
|
||||
Version string `json:"compilerVersion"`
|
||||
AbiDefinition json.RawMessage `json:"abiDefinition"`
|
||||
UserDoc userDoc `json:"userDoc"`
|
||||
DeveloperDoc json.RawMessage `json:"developerDoc"`
|
||||
}
|
||||
|
||||
func New(xeth *xeth.XEth, jsontx string, http *httpclient.HTTPClient) (self *NatSpec, err error) {
|
||||
|
||||
// extract contract address from tx
|
||||
var tx jsonTx
|
||||
err = json.Unmarshal([]byte(jsontx), &tx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
t := tx.Params[0]
|
||||
contractAddress := t.To
|
||||
|
||||
content, err := FetchDocsForContract(contractAddress, xeth, http)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
self, err = NewWithDocs(content, jsontx, t.Data)
|
||||
return
|
||||
}
|
||||
|
||||
// also called by admin.contractInfo.get
|
||||
func FetchDocsForContract(contractAddress string, xeth *xeth.XEth, client *httpclient.HTTPClient) (content []byte, err error) {
|
||||
// retrieve contract hash from state
|
||||
codehex := xeth.CodeAt(contractAddress)
|
||||
codeb := xeth.CodeAtBytes(contractAddress)
|
||||
|
||||
if codehex == "0x" {
|
||||
err = fmt.Errorf("contract (%v) not found", contractAddress)
|
||||
return
|
||||
}
|
||||
codehash := common.BytesToHash(crypto.Keccak256(codeb))
|
||||
// set up nameresolver with natspecreg + urlhint contract addresses
|
||||
reg := registrar.New(xeth)
|
||||
|
||||
// resolve host via HashReg/UrlHint Resolver
|
||||
hash, err := reg.HashToHash(codehash)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if client.HasScheme("bzz") {
|
||||
content, err = client.Get("bzz://"+hash.Hex()[2:], "")
|
||||
if err == nil { // non-fatal
|
||||
return
|
||||
}
|
||||
err = nil
|
||||
//falling back to urlhint
|
||||
}
|
||||
|
||||
uri, err := reg.HashToUrl(hash)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// get content via http client and authenticate content using hash
|
||||
content, err = client.GetAuthContent(uri, hash)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewWithDocs(infoDoc []byte, tx string, data string) (self *NatSpec, err error) {
|
||||
|
||||
var contract contractInfo
|
||||
err = json.Unmarshal(infoDoc, &contract)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
self = &NatSpec{
|
||||
jsvm: otto.New(),
|
||||
abiDocJson: []byte(contract.AbiDefinition),
|
||||
userDoc: contract.UserDoc,
|
||||
tx: tx,
|
||||
data: data,
|
||||
}
|
||||
|
||||
// load and require natspec js (but it is meant to be protected environment)
|
||||
_, err = self.jsvm.Run(natspecJS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = self.jsvm.Run("var natspec = require('natspec');")
|
||||
return
|
||||
}
|
||||
|
||||
// type abiDoc []method
|
||||
|
||||
// type method struct {
|
||||
// Name string `json:name`
|
||||
// Inputs []input `json:inputs`
|
||||
// abiKey [8]byte
|
||||
// }
|
||||
|
||||
// type input struct {
|
||||
// Name string `json:name`
|
||||
// Type string `json:type`
|
||||
// }
|
||||
|
||||
// json skeleton for abi doc (contract method definitions)
|
||||
type method struct {
|
||||
Notice string `json:notice`
|
||||
name string
|
||||
}
|
||||
|
||||
type userDoc struct {
|
||||
Methods map[string]*method `json:methods`
|
||||
}
|
||||
|
||||
func (self *NatSpec) makeAbi2method(abiKey [8]byte) (meth *method) {
|
||||
for signature, m := range self.userDoc.Methods {
|
||||
name := strings.Split(signature, "(")[0]
|
||||
hash := []byte(common.Bytes2Hex(crypto.Keccak256([]byte(signature))))
|
||||
var key [8]byte
|
||||
copy(key[:], hash[:8])
|
||||
if bytes.Equal(key[:], abiKey[:]) {
|
||||
meth = m
|
||||
meth.name = name
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (self *NatSpec) Notice() (notice string, err error) {
|
||||
var abiKey [8]byte
|
||||
if len(self.data) < 10 {
|
||||
err = fmt.Errorf("Invalid transaction data")
|
||||
return
|
||||
}
|
||||
copy(abiKey[:], self.data[2:10])
|
||||
meth := self.makeAbi2method(abiKey)
|
||||
|
||||
if meth == nil {
|
||||
err = fmt.Errorf("abi key does not match any method")
|
||||
return
|
||||
}
|
||||
notice, err = self.noticeForMethod(self.tx, meth.name, meth.Notice)
|
||||
return
|
||||
}
|
||||
|
||||
func (self *NatSpec) noticeForMethod(tx string, name, expression string) (notice string, err error) {
|
||||
|
||||
if _, err = self.jsvm.Run("var transaction = " + tx + ";"); err != nil {
|
||||
return "", fmt.Errorf("natspec.js error setting transaction: %v", err)
|
||||
}
|
||||
|
||||
if _, err = self.jsvm.Run("var abi = " + string(self.abiDocJson) + ";"); err != nil {
|
||||
return "", fmt.Errorf("natspec.js error setting abi: %v", err)
|
||||
}
|
||||
|
||||
if _, err = self.jsvm.Run("var method = '" + name + "';"); err != nil {
|
||||
return "", fmt.Errorf("natspec.js error setting method: %v", err)
|
||||
}
|
||||
|
||||
if _, err = self.jsvm.Run("var expression = \"" + expression + "\";"); err != nil {
|
||||
return "", fmt.Errorf("natspec.js error setting expression: %v", err)
|
||||
}
|
||||
|
||||
self.jsvm.Run("var call = {method: method,abi: abi,transaction: transaction};")
|
||||
value, err := self.jsvm.Run("natspec.evaluateExpression(expression, call);")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("natspec.js error evaluating expression: %v", err)
|
||||
}
|
||||
evalError := "Natspec evaluation failed, wrong input params"
|
||||
if value.String() == evalError {
|
||||
return "", fmt.Errorf("natspec.js error evaluating expression: wrong input params in expression '%s'", expression)
|
||||
}
|
||||
if len(value.String()) == 0 {
|
||||
return "", fmt.Errorf("natspec.js error evaluating expression")
|
||||
}
|
||||
|
||||
return value.String(), nil
|
||||
|
||||
}
|
|
@ -1,357 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package natspec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||
"github.com/ethereum/go-ethereum/common/registrar"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
xe "github.com/ethereum/go-ethereum/xeth"
|
||||
)
|
||||
|
||||
const (
|
||||
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
testBalance = "10000000000000000000"
|
||||
testKey = "e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674"
|
||||
|
||||
testFileName = "long_file_name_for_testing_registration_of_URLs_longer_than_32_bytes.content"
|
||||
|
||||
testNotice = "Register key `utils.toHex(_key)` <- content `utils.toHex(_content)`"
|
||||
|
||||
testExpNotice = "Register key 0xadd1a7d961cff0242089674ec2ef6fca671ab15e1fe80e38859fc815b98d88ab <- content 0xb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7"
|
||||
|
||||
testExpNotice2 = `About to submit transaction (NatSpec notice error: abi key does not match any method): {"params":[{"to":"%s","data": "0x31e12c20"}]}`
|
||||
|
||||
testExpNotice3 = `About to submit transaction (no NatSpec info found for contract: HashToHash: content hash not found for '0x1392c62d05b2d149e22a339c531157ae06b44d39a674cce500064b12b9aeb019'): {"params":[{"to":"%s","data": "0x300a3bbfb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066696c653a2f2f2f746573742e636f6e74656e74"}]}`
|
||||
)
|
||||
|
||||
const (
|
||||
testUserDoc = `
|
||||
{
|
||||
"methods": {
|
||||
"register(uint256,uint256)": {
|
||||
"notice": "` + testNotice + `"
|
||||
}
|
||||
},
|
||||
"invariants": [
|
||||
{ "notice": "" }
|
||||
],
|
||||
"construction": [
|
||||
{ "notice": "" }
|
||||
]
|
||||
}
|
||||
`
|
||||
testAbiDefinition = `
|
||||
[{
|
||||
"name": "register",
|
||||
"constant": false,
|
||||
"type": "function",
|
||||
"inputs": [{
|
||||
"name": "_key",
|
||||
"type": "uint256"
|
||||
}, {
|
||||
"name": "_content",
|
||||
"type": "uint256"
|
||||
}],
|
||||
"outputs": []
|
||||
}]
|
||||
`
|
||||
|
||||
testContractInfo = `
|
||||
{
|
||||
"userDoc": ` + testUserDoc + `,
|
||||
"abiDefinition": ` + testAbiDefinition + `
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
type testFrontend struct {
|
||||
t *testing.T
|
||||
ethereum *eth.Ethereum
|
||||
xeth *xe.XEth
|
||||
wait chan *big.Int
|
||||
lastConfirm string
|
||||
wantNatSpec bool
|
||||
}
|
||||
|
||||
func (self *testFrontend) AskPassword() (string, bool) {
|
||||
return "", true
|
||||
}
|
||||
|
||||
func (self *testFrontend) UnlockAccount(acc []byte) bool {
|
||||
self.ethereum.AccountManager().Unlock(common.BytesToAddress(acc), "password")
|
||||
return true
|
||||
}
|
||||
|
||||
func (self *testFrontend) ConfirmTransaction(tx string) bool {
|
||||
if self.wantNatSpec {
|
||||
client := httpclient.New("/tmp/")
|
||||
self.lastConfirm = GetNotice(self.xeth, tx, client)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
|
||||
|
||||
tmp, err := ioutil.TempDir("", "natspec-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
addr := common.HexToAddress(testAddress)
|
||||
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, common.String2Big(testBalance)})
|
||||
ks := crypto.NewKeyStorePassphrase(filepath.Join(tmp, "keystore"), crypto.LightScryptN, crypto.LightScryptP)
|
||||
am := accounts.NewManager(ks)
|
||||
keyb, err := crypto.HexToECDSA(testKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key := crypto.NewKeyFromECDSA(keyb)
|
||||
err = ks.StoreKey(key, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = am.Unlock(key.Address, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// only use minimalistic stack with no networking
|
||||
return eth.New(&node.ServiceContext{EventMux: new(event.TypeMux)}, ð.Config{
|
||||
AccountManager: am,
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
PowTest: true,
|
||||
TestGenesisState: db,
|
||||
GpoMinGasPrice: common.Big1,
|
||||
GpobaseCorrectionFactor: 1,
|
||||
GpoMaxGasPrice: common.Big1,
|
||||
})
|
||||
}
|
||||
|
||||
func testInit(t *testing.T) (self *testFrontend) {
|
||||
// initialise and start minimal ethereum stack
|
||||
ethereum, err := testEth(t)
|
||||
if err != nil {
|
||||
t.Errorf("error creating ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
err = ethereum.Start(nil)
|
||||
if err != nil {
|
||||
t.Errorf("error starting ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// mock frontend
|
||||
self = &testFrontend{t: t, ethereum: ethereum}
|
||||
self.xeth = xe.New(nil, self)
|
||||
self.wait = self.xeth.UpdateState()
|
||||
addr, _ := self.ethereum.Etherbase()
|
||||
|
||||
// initialise the registry contracts
|
||||
reg := registrar.New(self.xeth)
|
||||
registrar.GlobalRegistrarAddr = "0x0"
|
||||
|
||||
var txG, txH, txU string
|
||||
txG, err = reg.SetGlobalRegistrar("", addr)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating GlobalRegistrar: %v", err)
|
||||
}
|
||||
if !processTxs(self, t, 1) {
|
||||
t.Fatalf("error mining txs")
|
||||
}
|
||||
recG := self.xeth.GetTxReceipt(common.HexToHash(txG))
|
||||
if recG == nil {
|
||||
t.Fatalf("blockchain error creating GlobalRegistrar")
|
||||
}
|
||||
registrar.GlobalRegistrarAddr = recG.ContractAddress.Hex()
|
||||
|
||||
txH, err = reg.SetHashReg("", addr)
|
||||
if err != nil {
|
||||
t.Errorf("error creating HashReg: %v", err)
|
||||
}
|
||||
if !processTxs(self, t, 1) {
|
||||
t.Errorf("error mining txs")
|
||||
}
|
||||
recH := self.xeth.GetTxReceipt(common.HexToHash(txH))
|
||||
if recH == nil {
|
||||
t.Fatalf("blockchain error creating HashReg")
|
||||
}
|
||||
registrar.HashRegAddr = recH.ContractAddress.Hex()
|
||||
|
||||
txU, err = reg.SetUrlHint("", addr)
|
||||
if err != nil {
|
||||
t.Errorf("error creating UrlHint: %v", err)
|
||||
}
|
||||
if !processTxs(self, t, 1) {
|
||||
t.Errorf("error mining txs")
|
||||
}
|
||||
recU := self.xeth.GetTxReceipt(common.HexToHash(txU))
|
||||
if recU == nil {
|
||||
t.Fatalf("blockchain error creating UrlHint")
|
||||
}
|
||||
registrar.UrlHintAddr = recU.ContractAddress.Hex()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// end to end test
|
||||
func TestNatspecE2E(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
tf := testInit(t)
|
||||
defer tf.ethereum.Stop()
|
||||
addr, _ := tf.ethereum.Etherbase()
|
||||
|
||||
// create a contractInfo file (mock cloud-deployed contract metadocs)
|
||||
// incidentally this is the info for the HashReg contract itself
|
||||
ioutil.WriteFile("/tmp/"+testFileName, []byte(testContractInfo), os.ModePerm)
|
||||
dochash := crypto.Keccak256Hash([]byte(testContractInfo))
|
||||
|
||||
// take the codehash for the contract we wanna test
|
||||
codeb := tf.xeth.CodeAtBytes(registrar.HashRegAddr)
|
||||
codehash := crypto.Keccak256Hash(codeb)
|
||||
|
||||
reg := registrar.New(tf.xeth)
|
||||
_, err := reg.SetHashToHash(addr, codehash, dochash)
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
_, err = reg.SetUrlToHash(addr, dochash, "file:///"+testFileName)
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
if !processTxs(tf, t, 5) {
|
||||
return
|
||||
}
|
||||
|
||||
// NatSpec info for register method of HashReg contract installed
|
||||
// now using the same transactions to check confirm messages
|
||||
|
||||
tf.wantNatSpec = true // this is set so now the backend uses natspec confirmation
|
||||
_, err = reg.SetHashToHash(addr, codehash, dochash)
|
||||
if err != nil {
|
||||
t.Errorf("error calling contract registry: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("GlobalRegistrar: %v, HashReg: %v, UrlHint: %v\n", registrar.GlobalRegistrarAddr, registrar.HashRegAddr, registrar.UrlHintAddr)
|
||||
if tf.lastConfirm != testExpNotice {
|
||||
t.Errorf("Wrong confirm message. expected\n'%v', got\n'%v'", testExpNotice, tf.lastConfirm)
|
||||
}
|
||||
|
||||
// test unknown method
|
||||
exp := fmt.Sprintf(testExpNotice2, registrar.HashRegAddr)
|
||||
_, err = reg.SetOwner(addr)
|
||||
if err != nil {
|
||||
t.Errorf("error setting owner: %v", err)
|
||||
}
|
||||
|
||||
if tf.lastConfirm != exp {
|
||||
t.Errorf("Wrong confirm message, expected\n'%v', got\n'%v'", exp, tf.lastConfirm)
|
||||
}
|
||||
|
||||
// test unknown contract
|
||||
exp = fmt.Sprintf(testExpNotice3, registrar.UrlHintAddr)
|
||||
|
||||
_, err = reg.SetUrlToHash(addr, dochash, "file:///test.content")
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
|
||||
if tf.lastConfirm != exp {
|
||||
t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func pendingTransactions(repl *testFrontend, t *testing.T) (txc int64, err error) {
|
||||
txs := repl.ethereum.TxPool().GetTransactions()
|
||||
return int64(len(txs)), nil
|
||||
}
|
||||
|
||||
func processTxs(repl *testFrontend, t *testing.T, expTxc int) bool {
|
||||
var txc int64
|
||||
var err error
|
||||
for i := 0; i < 50; i++ {
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if expTxc < int(txc) {
|
||||
t.Errorf("too many pending transactions: expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
} else if expTxc == int(txc) {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if int(txc) != expTxc {
|
||||
t.Errorf("incorrect number of pending transactions, expected %v, got %v", expTxc, txc)
|
||||
return false
|
||||
}
|
||||
|
||||
err = repl.ethereum.StartMining(runtime.NumCPU(), "")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error mining: %v", err)
|
||||
return false
|
||||
}
|
||||
defer repl.ethereum.StopMining()
|
||||
|
||||
timer := time.NewTimer(100 * time.Second)
|
||||
height := new(big.Int).Add(repl.xeth.CurrentBlock().Number(), big.NewInt(1))
|
||||
repl.wait <- height
|
||||
select {
|
||||
case <-timer.C:
|
||||
// if times out make sure the xeth loop does not block
|
||||
go func() {
|
||||
select {
|
||||
case repl.wait <- nil:
|
||||
case <-repl.wait:
|
||||
}
|
||||
}()
|
||||
case <-repl.wait:
|
||||
}
|
||||
txc, err = pendingTransactions(repl, t)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error checking pending transactions: %v", err)
|
||||
return false
|
||||
}
|
||||
if txc != 0 {
|
||||
t.Errorf("%d trasactions were not mined", txc)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -1,160 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package natspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func makeInfoDoc(desc string) []byte {
|
||||
return []byte(`
|
||||
{
|
||||
"source": "contract test { }",
|
||||
"language": "Solidity",
|
||||
"compilerVersion": "1",
|
||||
"userDoc": {
|
||||
"methods": {
|
||||
"multiply(uint256)": {
|
||||
"notice": "` + desc + `"
|
||||
},
|
||||
"balance(address)": {
|
||||
"notice": "` + "`(balanceInmGAV / 1000).fixed(0,3)`" + ` GAV is the total funds available to ` + "`who.address()`." + `"
|
||||
}
|
||||
},
|
||||
"invariants": [
|
||||
{ "notice": "The sum total amount of GAV in the system is 1 million." }
|
||||
],
|
||||
"construction": [
|
||||
{ "notice": "Endows ` + "`message.caller.address()`" + ` with 1m GAV." }
|
||||
]
|
||||
},
|
||||
"abiDefinition": [{
|
||||
"name": "multiply",
|
||||
"constant": false,
|
||||
"type": "function",
|
||||
"inputs": [{
|
||||
"name": "a",
|
||||
"type": "uint256"
|
||||
}],
|
||||
"outputs": [{
|
||||
"name": "d",
|
||||
"type": "uint256"
|
||||
}]
|
||||
}]
|
||||
}`)
|
||||
}
|
||||
|
||||
var data = "0xc6888fa1000000000000000000000000000000000000000000000000000000000000007a"
|
||||
|
||||
var tx = `
|
||||
{
|
||||
"params": [{
|
||||
"to": "0x8521742d3f456bd237e312d6e30724960f72517a",
|
||||
"data": "0xc6888fa1000000000000000000000000000000000000000000000000000000000000007a"
|
||||
}],
|
||||
}
|
||||
`
|
||||
|
||||
func TestNotice(t *testing.T) {
|
||||
|
||||
desc := "Will multiply `a` by 7 and return `a * 7`."
|
||||
expected := "Will multiply 122 by 7 and return 854."
|
||||
|
||||
infodoc := makeInfoDoc(desc)
|
||||
ns, err := NewWithDocs(infodoc, tx, data)
|
||||
if err != nil {
|
||||
t.Errorf("New: error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
notice, err := ns.Notice()
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
}
|
||||
|
||||
if notice != expected {
|
||||
t.Errorf("incorrect notice. expected %v, got %v", expected, notice)
|
||||
}
|
||||
}
|
||||
|
||||
// test missing method
|
||||
func TestMissingMethod(t *testing.T) {
|
||||
|
||||
desc := "Will multiply `a` by 7 and return `a * 7`."
|
||||
expected := "natspec.js error evaluating expression: Natspec evaluation failed, method does not exist"
|
||||
|
||||
infodoc := makeInfoDoc(desc)
|
||||
ns, err := NewWithDocs(infodoc, tx, data)
|
||||
if err != nil {
|
||||
t.Errorf("New: error: %v", err)
|
||||
}
|
||||
|
||||
notice, err := ns.noticeForMethod(tx, "missing_method", "")
|
||||
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nothing (notice: '%v')", notice)
|
||||
} else {
|
||||
if err.Error() != expected {
|
||||
t.Errorf("expected error '%s' got '%v' (notice: '%v')", expected, err, notice)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test invalid desc
|
||||
|
||||
func TestInvalidDesc(t *testing.T) {
|
||||
|
||||
desc := "Will multiply 122 by \"7\" and return 854."
|
||||
expected := "invalid character '7' after object key:value pair"
|
||||
|
||||
infodoc := makeInfoDoc(desc)
|
||||
_, err := NewWithDocs(infodoc, tx, data)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nothing", err)
|
||||
} else {
|
||||
if err.Error() != expected {
|
||||
t.Errorf("expected error '%s' got '%v'", expected, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test wrong input params
|
||||
func TestWrongInputParams(t *testing.T) {
|
||||
|
||||
desc := "Will multiply `e` by 7 and return `a * 7`."
|
||||
expected := "natspec.js error evaluating expression: Natspec evaluation failed, wrong input params"
|
||||
|
||||
infodoc := makeInfoDoc(desc)
|
||||
ns, err := NewWithDocs(infodoc, tx, data)
|
||||
if err != nil {
|
||||
t.Errorf("New: error: %v", err)
|
||||
}
|
||||
|
||||
notice, err := ns.Notice()
|
||||
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nothing (notice: '%v')", notice)
|
||||
} else {
|
||||
if err.Error() != expected {
|
||||
t.Errorf("expected error '%s' got '%v' (notice: '%v')", expected, err, notice)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -102,7 +102,7 @@ func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common
|
|||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
||||
if !s.e.IsMining() {
|
||||
if err := s.e.StartMining(0, ""); err != nil {
|
||||
if err := s.e.StartMining(0); err != nil {
|
||||
return work, err
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func (s *PrivateMinerAPI) Start(threads *rpc.HexNumber) (bool, error) {
|
|||
threads = rpc.NewHexNumber(runtime.NumCPU())
|
||||
}
|
||||
|
||||
err := s.e.StartMining(threads.Int(), "")
|
||||
err := s.e.StartMining(threads.Int())
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -362,6 +362,17 @@ func (self *Ethereum) SetEtherbase(etherbase common.Address) {
|
|||
self.miner.SetEtherbase(etherbase)
|
||||
}
|
||||
|
||||
func (s *Ethereum) StartMining(threads int) error {
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
||||
glog.V(logger.Error).Infoln(err)
|
||||
return err
|
||||
}
|
||||
go s.miner.Start(eb, threads)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Ethereum) StopMining() { s.miner.Stop() }
|
||||
func (s *Ethereum) IsMining() bool { return s.miner.Mining() }
|
||||
func (s *Ethereum) Miner() *miner.Miner { return s.miner }
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !opencl
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
)
|
||||
|
||||
const disabledInfo = "Set GO_OPENCL and re-build to enable."
|
||||
|
||||
func (s *Ethereum) StartMining(threads int, gpus string) error {
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
||||
glog.V(logger.Error).Infoln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if gpus != "" {
|
||||
return errors.New("GPU mining disabled. " + disabledInfo)
|
||||
}
|
||||
|
||||
// CPU mining
|
||||
go s.miner.Start(eb, threads)
|
||||
return nil
|
||||
}
|
||||
|
||||
func GPUBench(gpuid uint64) {
|
||||
fmt.Println("GPU mining disabled. " + disabledInfo)
|
||||
}
|
||||
|
||||
func PrintOpenCLDevices() {
|
||||
fmt.Println("OpenCL disabled. " + disabledInfo)
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build opencl
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/ethash"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
)
|
||||
|
||||
func (s *Ethereum) StartMining(threads int, gpus string) error {
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
||||
glog.V(logger.Error).Infoln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// GPU mining
|
||||
if gpus != "" {
|
||||
var ids []int
|
||||
for _, s := range strings.Split(gpus, ",") {
|
||||
i, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid GPU id(s): %v", err)
|
||||
}
|
||||
if i < 0 {
|
||||
return fmt.Errorf("Invalid GPU id: %v", i)
|
||||
}
|
||||
ids = append(ids, i)
|
||||
}
|
||||
|
||||
// TODO: re-creating miner is a bit ugly
|
||||
s.miner = miner.New(s, s.chainConfig, s.EventMux(), ethash.NewCL(ids))
|
||||
go s.miner.Start(eb, len(ids))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CPU mining
|
||||
go s.miner.Start(eb, threads)
|
||||
return nil
|
||||
}
|
||||
|
||||
func GPUBench(gpuid uint64) {
|
||||
e := ethash.NewCL([]int{int(gpuid)})
|
||||
|
||||
var h common.Hash
|
||||
bogoHeader := &types.Header{
|
||||
ParentHash: h,
|
||||
Number: big.NewInt(int64(42)),
|
||||
Difficulty: big.NewInt(int64(999999999999999)),
|
||||
}
|
||||
bogoBlock := types.NewBlock(bogoHeader, nil, nil, nil)
|
||||
|
||||
err := ethash.InitCL(bogoBlock.NumberU64(), e)
|
||||
if err != nil {
|
||||
fmt.Println("OpenCL init error: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
stopChan := make(chan struct{})
|
||||
reportHashRate := func() {
|
||||
for {
|
||||
time.Sleep(3 * time.Second)
|
||||
fmt.Printf("hashes/s : %v\n", e.GetHashrate())
|
||||
}
|
||||
}
|
||||
fmt.Printf("Starting benchmark (%v seconds)\n", 60)
|
||||
go reportHashRate()
|
||||
go e.Search(bogoBlock, stopChan, 0)
|
||||
time.Sleep(60 * time.Second)
|
||||
fmt.Println("OK.")
|
||||
}
|
||||
|
||||
func PrintOpenCLDevices() {
|
||||
ethash.PrintDevices()
|
||||
}
|
|
@ -55,15 +55,14 @@ func GOPATH() string {
|
|||
if len(path) == 0 {
|
||||
log.Fatal("GOPATH is not set")
|
||||
}
|
||||
// Ensure Godeps workspace is present in the path.
|
||||
godeps, _ := filepath.Abs(filepath.Join("Godeps", "_workspace"))
|
||||
// Ensure that our internal vendor folder in on GOPATH
|
||||
vendor, _ := filepath.Abs(filepath.Join("build", "_vendor"))
|
||||
for _, dir := range path {
|
||||
if dir == godeps {
|
||||
if dir == vendor {
|
||||
return strings.Join(path, string(filepath.ListSeparator))
|
||||
}
|
||||
}
|
||||
newpath := append(path[:1], godeps)
|
||||
newpath = append(newpath, path[1:]...)
|
||||
newpath := append(path[:1], append([]string{vendor}, path[1:]...)...)
|
||||
return strings.Join(newpath, string(filepath.ListSeparator))
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# package
|
||||
github.com/ethereum/go-ethereum
|
||||
|
||||
# import
|
||||
github.com/cespare/cp 165db2f
|
||||
github.com/davecgh/go-spew v1.0.0-3-g6d21280
|
||||
github.com/ethereum/ethash v23.1-249-g214d4c0
|
||||
github.com/fatih/color v1.1.0-4-gbf82308
|
||||
github.com/gizak/termui d29684e
|
||||
github.com/golang/snappy d9eb7a3
|
||||
github.com/hashicorp/golang-lru 0a025b7
|
||||
github.com/huin/goupnp 97f671e
|
||||
github.com/jackpal/go-nat-pmp v1.0.1-4-g1fa385a
|
||||
github.com/mattn/go-colorable v0.0.6-6-g6c903ff
|
||||
github.com/mattn/go-isatty 66b8e73
|
||||
github.com/mattn/go-runewidth v0.0.1-10-g737072b
|
||||
github.com/mitchellh/go-wordwrap ad45545
|
||||
github.com/nsf/termbox-go b6acae5
|
||||
github.com/pborman/uuid v1.0-17-g3d4f2ba
|
||||
github.com/peterh/liner 8975875
|
||||
github.com/rcrowley/go-metrics ab2277b
|
||||
github.com/rjeczalik/notify 7e20c15
|
||||
github.com/robertkrimen/otto bf1c379
|
||||
github.com/rs/cors v1.0
|
||||
github.com/rs/xhandler v1.0-1-ged27b6f
|
||||
github.com/syndtr/goleveldb 6b4daa5
|
||||
golang.org/x/crypto ca7e7f1
|
||||
golang.org/x/net b336a97
|
||||
golang.org/x/sys c200b10
|
||||
golang.org/x/text a8b3843
|
||||
golang.org/x/tools 0db92ca
|
||||
gopkg.in/check.v1 4f90aea
|
||||
gopkg.in/fatih/set.v0 v0.1.0-3-g27c4092
|
||||
gopkg.in/karalabe/cookiejar.v2 8dcd6a7
|
||||
gopkg.in/natefinch/npipe.v2 c1b8fa8
|
||||
gopkg.in/sourcemap.v1 v1.0.3
|
||||
gopkg.in/urfave/cli.v1 v1.18.1
|
||||
|
||||
# exclude
|
||||
-golang.org/x/net/context
|
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- 1.7
|
||||
install:
|
||||
- go get -v golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- go test -v -tags=safe ./spew
|
||||
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
|
||||
after_success:
|
||||
- go get -v github.com/mattn/goveralls
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
|
@ -1,3 +1,5 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
|
@ -0,0 +1,194 @@
|
|||
go-spew
|
||||
=======
|
||||
|
||||
[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
|
||||
(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
|
||||
(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
|
||||
(https://coveralls.io/r/davecgh/go-spew?branch=master)
|
||||
|
||||
Go-spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging. A comprehensive suite of tests with 100% test coverage is provided
|
||||
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
|
||||
report. Go-spew is licensed under the liberal ISC license, so it may be used in
|
||||
open source or commercial projects.
|
||||
|
||||
If you're interested in reading about how this package came to life and some
|
||||
of the challenges involved in providing a deep pretty printer, there is a blog
|
||||
post about it
|
||||
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
|
||||
|
||||
## Documentation
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
|
||||
(http://godoc.org/github.com/davecgh/go-spew/spew)
|
||||
|
||||
Full `go doc` style documentation for the project can be viewed online without
|
||||
installing this package by using the excellent GoDoc site here:
|
||||
http://godoc.org/github.com/davecgh/go-spew/spew
|
||||
|
||||
You can also view the documentation locally once the package is installed with
|
||||
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
|
||||
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/davecgh/go-spew/spew
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Add this import line to the file you're working in:
|
||||
|
||||
```Go
|
||||
import "github.com/davecgh/go-spew/spew"
|
||||
```
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
|
||||
```Go
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
```
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
|
||||
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
|
||||
and pointer addresses):
|
||||
|
||||
```Go
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
```
|
||||
|
||||
## Debugging a Web Application Example
|
||||
|
||||
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
|
||||
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", handler)
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Dump Output
|
||||
|
||||
```
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) {
|
||||
(string) "one": (bool) true
|
||||
}
|
||||
}
|
||||
([]uint8) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
```
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
```
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
```
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available via the
|
||||
spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
```
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables. This option
|
||||
relies on access to the unsafe package, so it will not have any effect when
|
||||
running in environments without access to the unsafe package such as Google
|
||||
App Engine or with the "safe" build tag specified.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are supported,
|
||||
with other types sorted according to the reflect.Value.String() output
|
||||
which guarantees display stability. Natural map order is used by
|
||||
default.
|
||||
|
||||
* SpewKeys
|
||||
SpewKeys specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only considered
|
||||
if SortKeys is true.
|
||||
|
||||
```
|
||||
|
||||
## Unsafe Package Dependency
|
||||
|
||||
This package relies on the unsafe package to perform some of the more advanced
|
||||
features, however it also supports a "limited" mode which allows it to work in
|
||||
environments where the unsafe package is not available. By default, it will
|
||||
operate in this mode on Google App Engine and when compiled with GopherJS. The
|
||||
"safe" build tag may also be specified to force the package to build without
|
||||
using the unsafe package.
|
||||
|
||||
## License
|
||||
|
||||
Go-spew is licensed under the liberal ISC License.
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
if ! type gocov >/dev/null 2>&1; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Only run the cgo tests if gcc is installed.
|
||||
if type gcc >/dev/null 2>&1; then
|
||||
(cd spew && gocov test -tags testcgo | gocov report)
|
||||
else
|
||||
(cd spew && gocov test | gocov report)
|
||||
fi
|
|
@ -13,9 +13,10 @@
|
|||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine and "-tags disableunsafe"
|
||||
// is not added to the go build command line.
|
||||
// +build !appengine,!disableunsafe
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
|
||||
package spew
|
||||
|
|
@ -13,9 +13,10 @@
|
|||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when either the code is running on Google App Engine or "-tags disableunsafe"
|
||||
// is added to the go build command line.
|
||||
// +build appengine disableunsafe
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
|
||||
package spew
|
||||
|
|
@ -64,7 +64,7 @@ type ConfigState struct {
|
|||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "disableunsafe" build tag specified.
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue