vendor: remove unused dependencies (#19683)
* vendor: remove unused dependencies These were used by swarm code, which has now migrated to its own repository. * travis.yml: remove sudo requirement for test builders These needed sudo to run FUSE tests for swarm.
This commit is contained in:
parent
e83c3ccc47
commit
afb9e6513f
12
.travis.yml
12
.travis.yml
|
@ -5,35 +5,23 @@ matrix:
|
|||
include:
|
||||
- os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.10.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.11.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# These are the latest Go versions.
|
||||
- os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.12.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
Copyright (c) 2013-2015 Tommi Virtanen.
|
||||
Copyright (c) 2009, 2011, 2012 The Go Authors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
|
||||
The following included software components have additional copyright
|
||||
notices and license terms that may differ from the above.
|
||||
|
||||
|
||||
File fuse.go:
|
||||
|
||||
// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c,
|
||||
// which carries this notice:
|
||||
//
|
||||
// The files in this directory are subject to the following license.
|
||||
//
|
||||
// The author of this software is Russ Cox.
|
||||
//
|
||||
// Copyright (c) 2006 Russ Cox
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose without fee is hereby granted, provided that this entire notice
|
||||
// is included in all copies of any software which is or includes a copy
|
||||
// or modification of this software and in all copies of the supporting
|
||||
// documentation for such software.
|
||||
//
|
||||
// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
|
||||
// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY
|
||||
// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
|
||||
// FITNESS FOR ANY PARTICULAR PURPOSE.
|
||||
|
||||
|
||||
File fuse_kernel.go:
|
||||
|
||||
// Derived from FUSE's fuse_kernel.h
|
||||
/*
|
||||
This file defines the kernel interface of FUSE
|
||||
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||
|
||||
|
||||
This -- and only this -- header file may also be distributed under
|
||||
the terms of the BSD Licence as follows:
|
||||
|
||||
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
*/
|
|
@ -1,23 +0,0 @@
|
|||
bazil.org/fuse -- Filesystems in Go
|
||||
===================================
|
||||
|
||||
`bazil.org/fuse` is a Go library for writing FUSE userspace
|
||||
filesystems.
|
||||
|
||||
It is a from-scratch implementation of the kernel-userspace
|
||||
communication protocol, and does not use the C library from the
|
||||
project called FUSE. `bazil.org/fuse` embraces Go fully for safety and
|
||||
ease of programming.
|
||||
|
||||
Here’s how to get going:
|
||||
|
||||
go get bazil.org/fuse
|
||||
|
||||
Website: http://bazil.org/fuse/
|
||||
|
||||
Github repository: https://github.com/bazil/fuse
|
||||
|
||||
API docs: http://godoc.org/bazil.org/fuse
|
||||
|
||||
Our thanks to Russ Cox for his fuse library, which this project is
|
||||
based on.
|
|
@ -1,35 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// buffer provides a mechanism for constructing a message from
|
||||
// multiple segments.
|
||||
type buffer []byte
|
||||
|
||||
// alloc allocates size bytes and returns a pointer to the new
|
||||
// segment.
|
||||
func (w *buffer) alloc(size uintptr) unsafe.Pointer {
|
||||
s := int(size)
|
||||
if len(*w)+s > cap(*w) {
|
||||
old := *w
|
||||
*w = make([]byte, len(*w), 2*cap(*w)+s)
|
||||
copy(*w, old)
|
||||
}
|
||||
l := len(*w)
|
||||
*w = (*w)[:l+s]
|
||||
return unsafe.Pointer(&(*w)[l])
|
||||
}
|
||||
|
||||
// reset clears out the contents of the buffer.
|
||||
func (w *buffer) reset() {
|
||||
for i := range (*w)[:cap(*w)] {
|
||||
(*w)[i] = 0
|
||||
}
|
||||
*w = (*w)[:0]
|
||||
}
|
||||
|
||||
func newBuffer(extra uintptr) buffer {
|
||||
const hdrSize = unsafe.Sizeof(outHeader{})
|
||||
buf := make(buffer, hdrSize, hdrSize+extra)
|
||||
return buf
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func stack() string {
|
||||
buf := make([]byte, 1024)
|
||||
return string(buf[:runtime.Stack(buf, false)])
|
||||
}
|
||||
|
||||
func nop(msg interface{}) {}
|
||||
|
||||
// Debug is called to output debug messages, including protocol
|
||||
// traces. The default behavior is to do nothing.
|
||||
//
|
||||
// The messages have human-friendly string representations and are
|
||||
// safe to marshal to JSON.
|
||||
//
|
||||
// Implementations must not retain msg.
|
||||
var Debug func(msg interface{}) = nop
|
|
@ -1,17 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
ENOATTR = Errno(syscall.ENOATTR)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENOATTR
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENOATTR"
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import "syscall"
|
||||
|
||||
const (
|
||||
ENOATTR = Errno(syscall.ENOATTR)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENOATTR
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENOATTR"
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
ENODATA = Errno(syscall.ENODATA)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENODATA
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENODATA"
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// There is very little commonality in extended attribute errors
|
||||
// across platforms.
|
||||
//
|
||||
// getxattr return value for "extended attribute does not exist" is
|
||||
// ENOATTR on OS X, and ENODATA on Linux and apparently at least
|
||||
// NetBSD. There may be a #define ENOATTR on Linux too, but the value
|
||||
// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no
|
||||
// ENODATA, only ENOATTR. ENOATTR is not in any of the standards,
|
||||
// ENODATA exists but is only used for STREAMs.
|
||||
//
|
||||
// Each platform will define it a errNoXattr constant, and this file
|
||||
// will enforce that it implements the right interfaces and hide the
|
||||
// implementation.
|
||||
//
|
||||
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2
|
||||
// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html
|
||||
|
||||
// ErrNoXattr is a platform-independent error value meaning the
|
||||
// extended attribute was not found. It can be used to respond to
|
||||
// GetxattrRequest and such.
|
||||
const ErrNoXattr = errNoXattr
|
||||
|
||||
var _ error = ErrNoXattr
|
||||
var _ Errno = ErrNoXattr
|
||||
var _ ErrorNumber = ErrNoXattr
|
File diff suppressed because it is too large
Load Diff
|
@ -1,99 +0,0 @@
|
|||
// FUSE directory tree, for servers that wish to use it with the service loop.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
// A Tree implements a basic read-only directory tree for FUSE.
|
||||
// The Nodes contained in it may still be writable.
|
||||
type Tree struct {
|
||||
tree
|
||||
}
|
||||
|
||||
func (t *Tree) Root() (Node, error) {
|
||||
return &t.tree, nil
|
||||
}
|
||||
|
||||
// Add adds the path to the tree, resolving to the given node.
|
||||
// If path or a prefix of path has already been added to the tree,
|
||||
// Add panics.
|
||||
//
|
||||
// Add is only safe to call before starting to serve requests.
|
||||
func (t *Tree) Add(path string, node Node) {
|
||||
path = pathpkg.Clean("/" + path)[1:]
|
||||
elems := strings.Split(path, "/")
|
||||
dir := Node(&t.tree)
|
||||
for i, elem := range elems {
|
||||
dt, ok := dir.(*tree)
|
||||
if !ok {
|
||||
panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path)
|
||||
}
|
||||
n := dt.lookup(elem)
|
||||
if n != nil {
|
||||
if i+1 == len(elems) {
|
||||
panic("fuse: Tree.Add for " + path + " conflicts with " + elem)
|
||||
}
|
||||
dir = n
|
||||
} else {
|
||||
if i+1 == len(elems) {
|
||||
dt.add(elem, node)
|
||||
} else {
|
||||
dir = &tree{}
|
||||
dt.add(elem, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type treeDir struct {
|
||||
name string
|
||||
node Node
|
||||
}
|
||||
|
||||
type tree struct {
|
||||
dir []treeDir
|
||||
}
|
||||
|
||||
func (t *tree) lookup(name string) Node {
|
||||
for _, d := range t.dir {
|
||||
if d.name == name {
|
||||
return d.node
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tree) add(name string, n Node) {
|
||||
t.dir = append(t.dir, treeDir{name, n})
|
||||
}
|
||||
|
||||
func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tree) Lookup(ctx context.Context, name string) (Node, error) {
|
||||
n := t.lookup(name)
|
||||
if n != nil {
|
||||
return n, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
var out []fuse.Dirent
|
||||
for _, d := range t.dir {
|
||||
out = append(out, fuse.Dirent{Name: d.name})
|
||||
}
|
||||
return out, nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,9 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="GO_MODULE" version="4">
|
||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||
<exclude-output />
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
|
@ -1,9 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will
|
||||
// forcibly close the /dev/fuse file descriptor on a Setxattr with a
|
||||
// 16MB value. See TestSetxattr16MB and
|
||||
// https://github.com/bazil/fuse/issues/42
|
||||
const maxWrite = 16 * 1024 * 1024
|
|
@ -1,6 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// This number is just a guess.
|
||||
const maxWrite = 128 * 1024
|
|
@ -1,774 +0,0 @@
|
|||
// See the file LICENSE for copyright and licensing information.
|
||||
|
||||
// Derived from FUSE's fuse_kernel.h, which carries this notice:
|
||||
/*
|
||||
This file defines the kernel interface of FUSE
|
||||
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||
|
||||
|
||||
This -- and only this -- header file may also be distributed under
|
||||
the terms of the BSD Licence as follows:
|
||||
|
||||
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// The FUSE version implemented by the package.
|
||||
const (
|
||||
protoVersionMinMajor = 7
|
||||
protoVersionMinMinor = 8
|
||||
protoVersionMaxMajor = 7
|
||||
protoVersionMaxMinor = 12
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = 1
|
||||
)
|
||||
|
||||
type kstatfs struct {
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Bsize uint32
|
||||
Namelen uint32
|
||||
Frsize uint32
|
||||
_ uint32
|
||||
Spare [6]uint32
|
||||
}
|
||||
|
||||
type fileLock struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
Type uint32
|
||||
Pid uint32
|
||||
}
|
||||
|
||||
// GetattrFlags are bit flags that can be seen in GetattrRequest.
|
||||
type GetattrFlags uint32
|
||||
|
||||
const (
|
||||
// Indicates the handle is valid.
|
||||
GetattrFh GetattrFlags = 1 << 0
|
||||
)
|
||||
|
||||
var getattrFlagsNames = []flagName{
|
||||
{uint32(GetattrFh), "GetattrFh"},
|
||||
}
|
||||
|
||||
func (fl GetattrFlags) String() string {
|
||||
return flagString(uint32(fl), getattrFlagsNames)
|
||||
}
|
||||
|
||||
// The SetattrValid are bit flags describing which fields in the SetattrRequest
|
||||
// are included in the change.
|
||||
type SetattrValid uint32
|
||||
|
||||
const (
|
||||
SetattrMode SetattrValid = 1 << 0
|
||||
SetattrUid SetattrValid = 1 << 1
|
||||
SetattrGid SetattrValid = 1 << 2
|
||||
SetattrSize SetattrValid = 1 << 3
|
||||
SetattrAtime SetattrValid = 1 << 4
|
||||
SetattrMtime SetattrValid = 1 << 5
|
||||
SetattrHandle SetattrValid = 1 << 6
|
||||
|
||||
// Linux only(?)
|
||||
SetattrAtimeNow SetattrValid = 1 << 7
|
||||
SetattrMtimeNow SetattrValid = 1 << 8
|
||||
SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
|
||||
|
||||
// OS X only
|
||||
SetattrCrtime SetattrValid = 1 << 28
|
||||
SetattrChgtime SetattrValid = 1 << 29
|
||||
SetattrBkuptime SetattrValid = 1 << 30
|
||||
SetattrFlags SetattrValid = 1 << 31
|
||||
)
|
||||
|
||||
func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 }
|
||||
func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 }
|
||||
func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 }
|
||||
func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 }
|
||||
func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 }
|
||||
func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 }
|
||||
func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 }
|
||||
func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 }
|
||||
func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 }
|
||||
func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 }
|
||||
func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 }
|
||||
func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 }
|
||||
func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 }
|
||||
func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 }
|
||||
|
||||
func (fl SetattrValid) String() string {
|
||||
return flagString(uint32(fl), setattrValidNames)
|
||||
}
|
||||
|
||||
var setattrValidNames = []flagName{
|
||||
{uint32(SetattrMode), "SetattrMode"},
|
||||
{uint32(SetattrUid), "SetattrUid"},
|
||||
{uint32(SetattrGid), "SetattrGid"},
|
||||
{uint32(SetattrSize), "SetattrSize"},
|
||||
{uint32(SetattrAtime), "SetattrAtime"},
|
||||
{uint32(SetattrMtime), "SetattrMtime"},
|
||||
{uint32(SetattrHandle), "SetattrHandle"},
|
||||
{uint32(SetattrAtimeNow), "SetattrAtimeNow"},
|
||||
{uint32(SetattrMtimeNow), "SetattrMtimeNow"},
|
||||
{uint32(SetattrLockOwner), "SetattrLockOwner"},
|
||||
{uint32(SetattrCrtime), "SetattrCrtime"},
|
||||
{uint32(SetattrChgtime), "SetattrChgtime"},
|
||||
{uint32(SetattrBkuptime), "SetattrBkuptime"},
|
||||
{uint32(SetattrFlags), "SetattrFlags"},
|
||||
}
|
||||
|
||||
// Flags that can be seen in OpenRequest.Flags.
|
||||
const (
|
||||
// Access modes. These are not 1-bit flags, but alternatives where
|
||||
// only one can be chosen. See the IsReadOnly etc convenience
|
||||
// methods.
|
||||
OpenReadOnly OpenFlags = syscall.O_RDONLY
|
||||
OpenWriteOnly OpenFlags = syscall.O_WRONLY
|
||||
OpenReadWrite OpenFlags = syscall.O_RDWR
|
||||
|
||||
// File was opened in append-only mode, all writes will go to end
|
||||
// of file. OS X does not provide this information.
|
||||
OpenAppend OpenFlags = syscall.O_APPEND
|
||||
OpenCreate OpenFlags = syscall.O_CREAT
|
||||
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||
OpenExclusive OpenFlags = syscall.O_EXCL
|
||||
OpenNonblock OpenFlags = syscall.O_NONBLOCK
|
||||
OpenSync OpenFlags = syscall.O_SYNC
|
||||
OpenTruncate OpenFlags = syscall.O_TRUNC
|
||||
)
|
||||
|
||||
// OpenAccessModeMask is a bitmask that separates the access mode
|
||||
// from the other flags in OpenFlags.
|
||||
const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE
|
||||
|
||||
// OpenFlags are the O_FOO flags passed to open/create/etc calls. For
|
||||
// example, os.O_WRONLY | os.O_APPEND.
|
||||
type OpenFlags uint32
|
||||
|
||||
func (fl OpenFlags) String() string {
|
||||
// O_RDONLY, O_RWONLY, O_RDWR are not flags
|
||||
s := accModeName(fl & OpenAccessModeMask)
|
||||
flags := uint32(fl &^ OpenAccessModeMask)
|
||||
if flags != 0 {
|
||||
s = s + "+" + flagString(flags, openFlagNames)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Return true if OpenReadOnly is set.
|
||||
func (fl OpenFlags) IsReadOnly() bool {
|
||||
return fl&OpenAccessModeMask == OpenReadOnly
|
||||
}
|
||||
|
||||
// Return true if OpenWriteOnly is set.
|
||||
func (fl OpenFlags) IsWriteOnly() bool {
|
||||
return fl&OpenAccessModeMask == OpenWriteOnly
|
||||
}
|
||||
|
||||
// Return true if OpenReadWrite is set.
|
||||
func (fl OpenFlags) IsReadWrite() bool {
|
||||
return fl&OpenAccessModeMask == OpenReadWrite
|
||||
}
|
||||
|
||||
func accModeName(flags OpenFlags) string {
|
||||
switch flags {
|
||||
case OpenReadOnly:
|
||||
return "OpenReadOnly"
|
||||
case OpenWriteOnly:
|
||||
return "OpenWriteOnly"
|
||||
case OpenReadWrite:
|
||||
return "OpenReadWrite"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
var openFlagNames = []flagName{
|
||||
{uint32(OpenAppend), "OpenAppend"},
|
||||
{uint32(OpenCreate), "OpenCreate"},
|
||||
{uint32(OpenDirectory), "OpenDirectory"},
|
||||
{uint32(OpenExclusive), "OpenExclusive"},
|
||||
{uint32(OpenNonblock), "OpenNonblock"},
|
||||
{uint32(OpenSync), "OpenSync"},
|
||||
{uint32(OpenTruncate), "OpenTruncate"},
|
||||
}
|
||||
|
||||
// The OpenResponseFlags are returned in the OpenResponse.
|
||||
type OpenResponseFlags uint32
|
||||
|
||||
const (
|
||||
OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file
|
||||
OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open
|
||||
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X)
|
||||
|
||||
OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X
|
||||
OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X
|
||||
)
|
||||
|
||||
func (fl OpenResponseFlags) String() string {
|
||||
return flagString(uint32(fl), openResponseFlagNames)
|
||||
}
|
||||
|
||||
var openResponseFlagNames = []flagName{
|
||||
{uint32(OpenDirectIO), "OpenDirectIO"},
|
||||
{uint32(OpenKeepCache), "OpenKeepCache"},
|
||||
{uint32(OpenNonSeekable), "OpenNonSeekable"},
|
||||
{uint32(OpenPurgeAttr), "OpenPurgeAttr"},
|
||||
{uint32(OpenPurgeUBC), "OpenPurgeUBC"},
|
||||
}
|
||||
|
||||
// The InitFlags are used in the Init exchange.
|
||||
type InitFlags uint32
|
||||
|
||||
const (
|
||||
InitAsyncRead InitFlags = 1 << 0
|
||||
InitPosixLocks InitFlags = 1 << 1
|
||||
InitFileOps InitFlags = 1 << 2
|
||||
InitAtomicTrunc InitFlags = 1 << 3
|
||||
InitExportSupport InitFlags = 1 << 4
|
||||
InitBigWrites InitFlags = 1 << 5
|
||||
// Do not mask file access modes with umask. Not supported on OS X.
|
||||
InitDontMask InitFlags = 1 << 6
|
||||
InitSpliceWrite InitFlags = 1 << 7
|
||||
InitSpliceMove InitFlags = 1 << 8
|
||||
InitSpliceRead InitFlags = 1 << 9
|
||||
InitFlockLocks InitFlags = 1 << 10
|
||||
InitHasIoctlDir InitFlags = 1 << 11
|
||||
InitAutoInvalData InitFlags = 1 << 12
|
||||
InitDoReaddirplus InitFlags = 1 << 13
|
||||
InitReaddirplusAuto InitFlags = 1 << 14
|
||||
InitAsyncDIO InitFlags = 1 << 15
|
||||
InitWritebackCache InitFlags = 1 << 16
|
||||
InitNoOpenSupport InitFlags = 1 << 17
|
||||
|
||||
InitCaseSensitive InitFlags = 1 << 29 // OS X only
|
||||
InitVolRename InitFlags = 1 << 30 // OS X only
|
||||
InitXtimes InitFlags = 1 << 31 // OS X only
|
||||
)
|
||||
|
||||
type flagName struct {
|
||||
bit uint32
|
||||
name string
|
||||
}
|
||||
|
||||
var initFlagNames = []flagName{
|
||||
{uint32(InitAsyncRead), "InitAsyncRead"},
|
||||
{uint32(InitPosixLocks), "InitPosixLocks"},
|
||||
{uint32(InitFileOps), "InitFileOps"},
|
||||
{uint32(InitAtomicTrunc), "InitAtomicTrunc"},
|
||||
{uint32(InitExportSupport), "InitExportSupport"},
|
||||
{uint32(InitBigWrites), "InitBigWrites"},
|
||||
{uint32(InitDontMask), "InitDontMask"},
|
||||
{uint32(InitSpliceWrite), "InitSpliceWrite"},
|
||||
{uint32(InitSpliceMove), "InitSpliceMove"},
|
||||
{uint32(InitSpliceRead), "InitSpliceRead"},
|
||||
{uint32(InitFlockLocks), "InitFlockLocks"},
|
||||
{uint32(InitHasIoctlDir), "InitHasIoctlDir"},
|
||||
{uint32(InitAutoInvalData), "InitAutoInvalData"},
|
||||
{uint32(InitDoReaddirplus), "InitDoReaddirplus"},
|
||||
{uint32(InitReaddirplusAuto), "InitReaddirplusAuto"},
|
||||
{uint32(InitAsyncDIO), "InitAsyncDIO"},
|
||||
{uint32(InitWritebackCache), "InitWritebackCache"},
|
||||
{uint32(InitNoOpenSupport), "InitNoOpenSupport"},
|
||||
|
||||
{uint32(InitCaseSensitive), "InitCaseSensitive"},
|
||||
{uint32(InitVolRename), "InitVolRename"},
|
||||
{uint32(InitXtimes), "InitXtimes"},
|
||||
}
|
||||
|
||||
func (fl InitFlags) String() string {
|
||||
return flagString(uint32(fl), initFlagNames)
|
||||
}
|
||||
|
||||
func flagString(f uint32, names []flagName) string {
|
||||
var s string
|
||||
|
||||
if f == 0 {
|
||||
return "0"
|
||||
}
|
||||
|
||||
for _, n := range names {
|
||||
if f&n.bit != 0 {
|
||||
s += "+" + n.name
|
||||
f &^= n.bit
|
||||
}
|
||||
}
|
||||
if f != 0 {
|
||||
s += fmt.Sprintf("%+#x", f)
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
// The ReleaseFlags are used in the Release exchange.
|
||||
type ReleaseFlags uint32
|
||||
|
||||
const (
|
||||
ReleaseFlush ReleaseFlags = 1 << 0
|
||||
)
|
||||
|
||||
func (fl ReleaseFlags) String() string {
|
||||
return flagString(uint32(fl), releaseFlagNames)
|
||||
}
|
||||
|
||||
var releaseFlagNames = []flagName{
|
||||
{uint32(ReleaseFlush), "ReleaseFlush"},
|
||||
}
|
||||
|
||||
// Opcodes
|
||||
const (
|
||||
opLookup = 1
|
||||
opForget = 2 // no reply
|
||||
opGetattr = 3
|
||||
opSetattr = 4
|
||||
opReadlink = 5
|
||||
opSymlink = 6
|
||||
opMknod = 8
|
||||
opMkdir = 9
|
||||
opUnlink = 10
|
||||
opRmdir = 11
|
||||
opRename = 12
|
||||
opLink = 13
|
||||
opOpen = 14
|
||||
opRead = 15
|
||||
opWrite = 16
|
||||
opStatfs = 17
|
||||
opRelease = 18
|
||||
opFsync = 20
|
||||
opSetxattr = 21
|
||||
opGetxattr = 22
|
||||
opListxattr = 23
|
||||
opRemovexattr = 24
|
||||
opFlush = 25
|
||||
opInit = 26
|
||||
opOpendir = 27
|
||||
opReaddir = 28
|
||||
opReleasedir = 29
|
||||
opFsyncdir = 30
|
||||
opGetlk = 31
|
||||
opSetlk = 32
|
||||
opSetlkw = 33
|
||||
opAccess = 34
|
||||
opCreate = 35
|
||||
opInterrupt = 36
|
||||
opBmap = 37
|
||||
opDestroy = 38
|
||||
opIoctl = 39 // Linux?
|
||||
opPoll = 40 // Linux?
|
||||
|
||||
// OS X
|
||||
opSetvolname = 61
|
||||
opGetxtimes = 62
|
||||
opExchange = 63
|
||||
)
|
||||
|
||||
type entryOut struct {
|
||||
Nodeid uint64 // Inode ID
|
||||
Generation uint64 // Inode generation
|
||||
EntryValid uint64 // Cache timeout for the name
|
||||
AttrValid uint64 // Cache timeout for the attributes
|
||||
EntryValidNsec uint32
|
||||
AttrValidNsec uint32
|
||||
Attr attr
|
||||
}
|
||||
|
||||
func entryOutSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize)
|
||||
default:
|
||||
return unsafe.Sizeof(entryOut{})
|
||||
}
|
||||
}
|
||||
|
||||
type forgetIn struct {
|
||||
Nlookup uint64
|
||||
}
|
||||
|
||||
type getattrIn struct {
|
||||
GetattrFlags uint32
|
||||
_ uint32
|
||||
Fh uint64
|
||||
}
|
||||
|
||||
type attrOut struct {
|
||||
AttrValid uint64 // Cache timeout for the attributes
|
||||
AttrValidNsec uint32
|
||||
_ uint32
|
||||
Attr attr
|
||||
}
|
||||
|
||||
func attrOutSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize)
|
||||
default:
|
||||
return unsafe.Sizeof(attrOut{})
|
||||
}
|
||||
}
|
||||
|
||||
// OS X
|
||||
type getxtimesOut struct {
|
||||
Bkuptime uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
}
|
||||
|
||||
type mknodIn struct {
|
||||
Mode uint32
|
||||
Rdev uint32
|
||||
Umask uint32
|
||||
_ uint32
|
||||
// "filename\x00" follows.
|
||||
}
|
||||
|
||||
func mknodInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(mknodIn{}.Umask)
|
||||
default:
|
||||
return unsafe.Sizeof(mknodIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type mkdirIn struct {
|
||||
Mode uint32
|
||||
Umask uint32
|
||||
// filename follows
|
||||
}
|
||||
|
||||
func mkdirInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(mkdirIn{}.Umask) + 4
|
||||
default:
|
||||
return unsafe.Sizeof(mkdirIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type renameIn struct {
|
||||
Newdir uint64
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
// OS X
|
||||
type exchangeIn struct {
|
||||
Olddir uint64
|
||||
Newdir uint64
|
||||
Options uint64
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
type linkIn struct {
|
||||
Oldnodeid uint64
|
||||
}
|
||||
|
||||
type setattrInCommon struct {
|
||||
Valid uint32
|
||||
_ uint32
|
||||
Fh uint64
|
||||
Size uint64
|
||||
LockOwner uint64 // unused on OS X?
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Unused2 uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
Unused3 uint32
|
||||
Mode uint32
|
||||
Unused4 uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Unused5 uint32
|
||||
}
|
||||
|
||||
type openIn struct {
|
||||
Flags uint32
|
||||
Unused uint32
|
||||
}
|
||||
|
||||
type openOut struct {
|
||||
Fh uint64
|
||||
OpenFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type createIn struct {
|
||||
Flags uint32
|
||||
Mode uint32
|
||||
Umask uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func createInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(createIn{}.Umask)
|
||||
default:
|
||||
return unsafe.Sizeof(createIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type releaseIn struct {
|
||||
Fh uint64
|
||||
Flags uint32
|
||||
ReleaseFlags uint32
|
||||
LockOwner uint32
|
||||
}
|
||||
|
||||
type flushIn struct {
|
||||
Fh uint64
|
||||
FlushFlags uint32
|
||||
_ uint32
|
||||
LockOwner uint64
|
||||
}
|
||||
|
||||
type readIn struct {
|
||||
Fh uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
ReadFlags uint32
|
||||
LockOwner uint64
|
||||
Flags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func readInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(readIn{}.ReadFlags) + 4
|
||||
default:
|
||||
return unsafe.Sizeof(readIn{})
|
||||
}
|
||||
}
|
||||
|
||||
// The ReadFlags are passed in ReadRequest.
|
||||
type ReadFlags uint32
|
||||
|
||||
const (
|
||||
// LockOwner field is valid.
|
||||
ReadLockOwner ReadFlags = 1 << 1
|
||||
)
|
||||
|
||||
var readFlagNames = []flagName{
|
||||
{uint32(ReadLockOwner), "ReadLockOwner"},
|
||||
}
|
||||
|
||||
func (fl ReadFlags) String() string {
|
||||
return flagString(uint32(fl), readFlagNames)
|
||||
}
|
||||
|
||||
type writeIn struct {
|
||||
Fh uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
WriteFlags uint32
|
||||
LockOwner uint64
|
||||
Flags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func writeInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(writeIn{}.LockOwner)
|
||||
default:
|
||||
return unsafe.Sizeof(writeIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type writeOut struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
// The WriteFlags are passed in WriteRequest.
|
||||
type WriteFlags uint32
|
||||
|
||||
const (
|
||||
WriteCache WriteFlags = 1 << 0
|
||||
// LockOwner field is valid.
|
||||
WriteLockOwner WriteFlags = 1 << 1
|
||||
)
|
||||
|
||||
var writeFlagNames = []flagName{
|
||||
{uint32(WriteCache), "WriteCache"},
|
||||
{uint32(WriteLockOwner), "WriteLockOwner"},
|
||||
}
|
||||
|
||||
func (fl WriteFlags) String() string {
|
||||
return flagString(uint32(fl), writeFlagNames)
|
||||
}
|
||||
|
||||
const compatStatfsSize = 48
|
||||
|
||||
type statfsOut struct {
|
||||
St kstatfs
|
||||
}
|
||||
|
||||
type fsyncIn struct {
|
||||
Fh uint64
|
||||
FsyncFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type setxattrInCommon struct {
|
||||
Size uint32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
func (setxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrInCommon struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func (getxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrOut struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type lkIn struct {
|
||||
Fh uint64
|
||||
Owner uint64
|
||||
Lk fileLock
|
||||
LkFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func lkInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(lkIn{}.LkFlags)
|
||||
default:
|
||||
return unsafe.Sizeof(lkIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type lkOut struct {
|
||||
Lk fileLock
|
||||
}
|
||||
|
||||
type accessIn struct {
|
||||
Mask uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type initIn struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
const initInSize = int(unsafe.Sizeof(initIn{}))
|
||||
|
||||
type initOut struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
Unused uint32
|
||||
MaxWrite uint32
|
||||
}
|
||||
|
||||
type interruptIn struct {
|
||||
Unique uint64
|
||||
}
|
||||
|
||||
type bmapIn struct {
|
||||
Block uint64
|
||||
BlockSize uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type bmapOut struct {
|
||||
Block uint64
|
||||
}
|
||||
|
||||
type inHeader struct {
|
||||
Len uint32
|
||||
Opcode uint32
|
||||
Unique uint64
|
||||
Nodeid uint64
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Pid uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
const inHeaderSize = int(unsafe.Sizeof(inHeader{}))
|
||||
|
||||
type outHeader struct {
|
||||
Len uint32
|
||||
Error int32
|
||||
Unique uint64
|
||||
}
|
||||
|
||||
type dirent struct {
|
||||
Ino uint64
|
||||
Off uint64
|
||||
Namelen uint32
|
||||
Type uint32
|
||||
Name [0]byte
|
||||
}
|
||||
|
||||
const direntSize = 8 + 8 + 4 + 4
|
||||
|
||||
const (
|
||||
notifyCodePoll int32 = 1
|
||||
notifyCodeInvalInode int32 = 2
|
||||
notifyCodeInvalEntry int32 = 3
|
||||
)
|
||||
|
||||
type notifyInvalInodeOut struct {
|
||||
Ino uint64
|
||||
Off int64
|
||||
Len int64
|
||||
}
|
||||
|
||||
type notifyInvalEntryOut struct {
|
||||
Parent uint64
|
||||
Namelen uint32
|
||||
_ uint32
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
Crtime_ uint64 // OS X only
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
CrtimeNsec uint32 // OS X only
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Flags_ uint32 // OS X only; see chflags(2)
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
a.Crtime_, a.CrtimeNsec = s, ns
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
a.Flags_ = f
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
|
||||
// OS X only
|
||||
Bkuptime_ uint64
|
||||
Chgtime_ uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
ChgtimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
Flags_ uint32 // see chflags(2)
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return in.Flags_
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (g *getxattrIn) position() uint32 {
|
||||
return g.Position
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (s *setxattrIn) position() uint32 {
|
||||
return s.Position
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
// on amd64, the 32-bit O_LARGEFILE flag is always seen;
|
||||
// on i386, the flag probably depends on the app
|
||||
// requesting, but in any case should be utterly
|
||||
// uninteresting to us here; our kernel protocol messages
|
||||
// are not directly related to the client app's kernel
|
||||
// API/ABI
|
||||
flags &^= 0x8000
|
||||
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
package fuse
|
|
@ -1,7 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// Linux 4.2.0 has been observed to cap this value at 128kB
|
||||
// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages).
|
||||
const maxWrite = 128 * 1024
|
|
@ -1,20 +0,0 @@
|
|||
package fuseutil // import "bazil.org/fuse/fuseutil"
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
// HandleRead handles a read request assuming that data is the entire file content.
|
||||
// It adjusts the amount returned in resp according to req.Offset and req.Size.
|
||||
func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) {
|
||||
if req.Offset >= int64(len(data)) {
|
||||
data = nil
|
||||
} else {
|
||||
data = data[req.Offset:]
|
||||
}
|
||||
if len(data) > req.Size {
|
||||
data = data[:req.Size]
|
||||
}
|
||||
n := copy(resp.Data[:req.Size], data)
|
||||
resp.Data = resp.Data[:n]
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOSXFUSENotFound is returned from Mount when the OSXFUSE
|
||||
// installation is not detected.
|
||||
//
|
||||
// Only happens on OS X. Make sure OSXFUSE is installed, or see
|
||||
// OSXFUSELocations for customization.
|
||||
ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE")
|
||||
)
|
||||
|
||||
func neverIgnoreLine(line string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) {
|
||||
defer wg.Done()
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if ignore(line) {
|
||||
continue
|
||||
}
|
||||
log.Printf("%s: %s", prefix, line)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Printf("%s, error reading: %v", prefix, err)
|
||||
}
|
||||
}
|
|
@ -1,208 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAvail = errors.New("no available fuse devices")
|
||||
errNotLoaded = errors.New("osxfuse is not loaded")
|
||||
)
|
||||
|
||||
func loadOSXFUSE(bin string) error {
|
||||
cmd := exec.Command(bin)
|
||||
cmd.Dir = "/"
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func openOSXFUSEDev(devPrefix string) (*os.File, error) {
|
||||
var f *os.File
|
||||
var err error
|
||||
for i := uint64(0); ; i++ {
|
||||
path := devPrefix + strconv.FormatUint(i, 10)
|
||||
f, err = os.OpenFile(path, os.O_RDWR, 0000)
|
||||
if os.IsNotExist(err) {
|
||||
if i == 0 {
|
||||
// not even the first device was found -> fuse is not loaded
|
||||
return nil, errNotLoaded
|
||||
}
|
||||
|
||||
// we've run out of kernel-provided devices
|
||||
return nil, errNoAvail
|
||||
}
|
||||
|
||||
if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY {
|
||||
// try the next one
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) {
|
||||
var noMountpointPrefix = helperName + `: `
|
||||
const noMountpointSuffix = `: No such file or directory`
|
||||
return func(line string) (ignore bool) {
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringMountOSXFUSEError returns whether the Wait error is
|
||||
// uninteresting; exit status 64 is.
|
||||
func isBoringMountOSXFUSEError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
// understand any escaping. See TestMountOptionCommaError.
|
||||
return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
cmd := exec.Command(
|
||||
bin,
|
||||
"-o", conf.getOptions(),
|
||||
// Tell osxfuse-kext how large our buffer is. It must split
|
||||
// writes larger than this into multiple writes.
|
||||
//
|
||||
// OSXFUSE seems to ignore InitResponse.MaxWrite, and uses
|
||||
// this instead.
|
||||
"-o", "iosize="+strconv.FormatUint(maxWrite, 10),
|
||||
// refers to fd passed in cmd.ExtraFiles
|
||||
"3",
|
||||
dir,
|
||||
)
|
||||
cmd.ExtraFiles = []*os.File{f}
|
||||
cmd.Env = os.Environ()
|
||||
// OSXFUSE <3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=")
|
||||
// OSXFUSE >=3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=")
|
||||
|
||||
daemon := os.Args[0]
|
||||
if daemonVar != "" {
|
||||
cmd.Env = append(cmd.Env, daemonVar+"="+daemon)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
helperName := path.Base(bin)
|
||||
go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringMountOSXFUSEError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
*errp = helperErr
|
||||
close(ready)
|
||||
return
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
|
||||
*errp = fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
close(ready)
|
||||
return
|
||||
}
|
||||
|
||||
*errp = nil
|
||||
close(ready)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
locations := conf.osxfuseLocations
|
||||
if locations == nil {
|
||||
locations = []OSXFUSEPaths{
|
||||
OSXFUSELocationV3,
|
||||
OSXFUSELocationV2,
|
||||
}
|
||||
}
|
||||
for _, loc := range locations {
|
||||
if _, err := os.Stat(loc.Mount); os.IsNotExist(err) {
|
||||
// try the other locations
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := openOSXFUSEDev(loc.DevicePrefix)
|
||||
if err == errNotLoaded {
|
||||
err = loadOSXFUSE(loc.Load)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// try again
|
||||
f, err = openOSXFUSEDev(loc.DevicePrefix)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
return nil, ErrOSXFUSENotFound
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||
return func(line string) (ignore bool) {
|
||||
const (
|
||||
noMountpointPrefix = `mount_fusefs: `
|
||||
noMountpointSuffix = `: No such file or directory`
|
||||
)
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringMountFusefsError returns whether the Wait error is
|
||||
// uninteresting; exit status 1 is.
|
||||
func isBoringMountFusefsError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
// understand any escaping. See TestMountOptionCommaError.
|
||||
return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000)
|
||||
if err != nil {
|
||||
*errp = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := exec.Command(
|
||||
"/sbin/mount_fusefs",
|
||||
"--safe",
|
||||
"-o", conf.getOptions(),
|
||||
"3",
|
||||
dir,
|
||||
)
|
||||
cmd.ExtraFiles = []*os.File{f}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringMountFusefsError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
return nil, helperErr
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||
}
|
||||
|
||||
close(ready)
|
||||
return f, nil
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||
return func(line string) (ignore bool) {
|
||||
if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` {
|
||||
// Silence this particular message, it occurs way too
|
||||
// commonly and isn't very relevant to whether the mount
|
||||
// succeeds or not.
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
noMountpointPrefix = `fusermount: failed to access mountpoint `
|
||||
noMountpointSuffix = `: No such file or directory`
|
||||
)
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringFusermountError returns whether the Wait error is
|
||||
// uninteresting; exit status 1 is.
|
||||
func isBoringFusermountError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) {
|
||||
// linux mount is never delayed
|
||||
close(ready)
|
||||
|
||||
fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socketpair error: %v", err)
|
||||
}
|
||||
|
||||
writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes")
|
||||
defer writeFile.Close()
|
||||
|
||||
readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads")
|
||||
defer readFile.Close()
|
||||
|
||||
cmd := exec.Command(
|
||||
"fusermount",
|
||||
"-o", conf.getOptions(),
|
||||
"--",
|
||||
dir,
|
||||
)
|
||||
cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3")
|
||||
|
||||
cmd.ExtraFiles = []*os.File{writeFile}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("fusermount: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringFusermountError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
return nil, helperErr
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("fusermount: %v", err)
|
||||
}
|
||||
|
||||
c, err := net.FileConn(readFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("FileConn from fusermount socket: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
uc, ok := c.(*net.UnixConn)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c)
|
||||
}
|
||||
|
||||
buf := make([]byte, 32) // expect 1 byte
|
||||
oob := make([]byte, 32) // expect 24 bytes
|
||||
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
|
||||
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ParseSocketControlMessage: %v", err)
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms)
|
||||
}
|
||||
scm := scms[0]
|
||||
gotFds, err := syscall.ParseUnixRights(&scm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err)
|
||||
}
|
||||
if len(gotFds) != 1 {
|
||||
return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds)
|
||||
}
|
||||
f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse")
|
||||
return f, nil
|
||||
}
|
|
@ -1,310 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func dummyOption(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountConfig holds the configuration for a mount operation.
|
||||
// Use it by passing MountOption values to Mount.
|
||||
type mountConfig struct {
|
||||
options map[string]string
|
||||
maxReadahead uint32
|
||||
initFlags InitFlags
|
||||
osxfuseLocations []OSXFUSEPaths
|
||||
}
|
||||
|
||||
func escapeComma(s string) string {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
s = strings.Replace(s, `,`, `\,`, -1)
|
||||
return s
|
||||
}
|
||||
|
||||
// getOptions makes a string of options suitable for passing to FUSE
|
||||
// mount flag `-o`. Returns an empty string if no options were set.
|
||||
// Any platform specific adjustments should happen before the call.
|
||||
func (m *mountConfig) getOptions() string {
|
||||
var opts []string
|
||||
for k, v := range m.options {
|
||||
k = escapeComma(k)
|
||||
if v != "" {
|
||||
k += "=" + escapeComma(v)
|
||||
}
|
||||
opts = append(opts, k)
|
||||
}
|
||||
return strings.Join(opts, ",")
|
||||
}
|
||||
|
||||
type mountOption func(*mountConfig) error
|
||||
|
||||
// MountOption is passed to Mount to change the behavior of the mount.
|
||||
type MountOption mountOption
|
||||
|
||||
// FSName sets the file system name (also called source) that is
|
||||
// visible in the list of mounted file systems.
|
||||
//
|
||||
// FreeBSD ignores this option.
|
||||
func FSName(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["fsname"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Subtype sets the subtype of the mount. The main type is always
|
||||
// `fuse`. The type in a list of mounted file systems will look like
|
||||
// `fuse.foo`.
|
||||
//
|
||||
// OS X ignores this option.
|
||||
// FreeBSD ignores this option.
|
||||
func Subtype(fstype string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["subtype"] = fstype
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LocalVolume sets the volume to be local (instead of network),
|
||||
// changing the behavior of Finder, Spotlight, and such.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func LocalVolume() MountOption {
|
||||
return localVolume
|
||||
}
|
||||
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func VolumeName(name string) MountOption {
|
||||
return volumeName(name)
|
||||
}
|
||||
|
||||
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||
// to store extended attributes on file systems that do not support
|
||||
// them natively.
|
||||
//
|
||||
// Such file names are:
|
||||
//
|
||||
// ._*
|
||||
// .DS_Store
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func NoAppleDouble() MountOption {
|
||||
return noAppleDouble
|
||||
}
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
// other such information.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func NoAppleXattr() MountOption {
|
||||
return noAppleXattr
|
||||
}
|
||||
|
||||
// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates,
|
||||
// i.e. create calls for which the initiator explicitly set the O_EXCL flag.
|
||||
//
|
||||
// OSXFUSE expects all create calls to return EEXIST in case the file
|
||||
// already exists, regardless of whether O_EXCL was specified or not.
|
||||
// To ensure this behavior, it normally sets OpenExclusive for all
|
||||
// Create calls, regardless of whether the original call had it set.
|
||||
// For distributed filesystems, that may force every file create to be
|
||||
// a distributed consensus action, causing undesirable delays.
|
||||
//
|
||||
// This option makes the FUSE filesystem see the original flag value,
|
||||
// and better decide when to ensure global consensus.
|
||||
//
|
||||
// Note that returning EEXIST on existing file create is still
|
||||
// expected with OSXFUSE, regardless of the presence of the
|
||||
// OpenExclusive flag.
|
||||
//
|
||||
// For more information, see
|
||||
// https://github.com/osxfuse/osxfuse/issues/209
|
||||
//
|
||||
// OS X only. Others ignore this options.
|
||||
// Requires OSXFUSE 3.4.1 or newer.
|
||||
func ExclCreate() MountOption {
|
||||
return exclCreate
|
||||
}
|
||||
|
||||
// DaemonTimeout sets the time in seconds between a request and a reply before
|
||||
// the FUSE mount is declared dead.
|
||||
//
|
||||
// OS X and FreeBSD only. Others ignore this option.
|
||||
func DaemonTimeout(name string) MountOption {
|
||||
return daemonTimeout(name)
|
||||
}
|
||||
|
||||
var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot")
|
||||
|
||||
// AllowOther allows other users to access the file system.
|
||||
//
|
||||
// Only one of AllowOther or AllowRoot can be used.
|
||||
func AllowOther() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
if _, ok := conf.options["allow_root"]; ok {
|
||||
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||
}
|
||||
conf.options["allow_other"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowRoot allows other users to access the file system.
|
||||
//
|
||||
// Only one of AllowOther or AllowRoot can be used.
|
||||
//
|
||||
// FreeBSD ignores this option.
|
||||
func AllowRoot() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
if _, ok := conf.options["allow_other"]; ok {
|
||||
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||
}
|
||||
conf.options["allow_root"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowDev enables interpreting character or block special devices on the
|
||||
// filesystem.
|
||||
func AllowDev() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["dev"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowSUID allows set-user-identifier or set-group-identifier bits to take
|
||||
// effect.
|
||||
func AllowSUID() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["suid"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPermissions makes the kernel enforce access control based on
|
||||
// the file mode (as in chmod).
|
||||
//
|
||||
// Without this option, the Node itself decides what is and is not
|
||||
// allowed. This is normally ok because FUSE file systems cannot be
|
||||
// accessed by other users without AllowOther/AllowRoot.
|
||||
//
|
||||
// FreeBSD ignores this option.
|
||||
func DefaultPermissions() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["default_permissions"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ReadOnly makes the mount read-only.
|
||||
func ReadOnly() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["ro"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxReadahead sets the number of bytes that can be prefetched for
|
||||
// sequential reads. The kernel can enforce a maximum value lower than
|
||||
// this.
|
||||
//
|
||||
// This setting makes the kernel perform speculative reads that do not
|
||||
// originate from any client process. This usually tremendously
|
||||
// improves read performance.
|
||||
func MaxReadahead(n uint32) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.maxReadahead = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AsyncRead enables multiple outstanding read requests for the same
|
||||
// handle. Without this, there is at most one request in flight at a
|
||||
// time.
|
||||
func AsyncRead() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitAsyncRead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WritebackCache enables the kernel to buffer writes before sending
|
||||
// them to the FUSE server. Without this, writethrough caching is
|
||||
// used.
|
||||
func WritebackCache() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitWritebackCache
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OSXFUSEPaths describes the paths used by an installed OSXFUSE
|
||||
// version. See OSXFUSELocationV3 for typical values.
|
||||
type OSXFUSEPaths struct {
|
||||
// Prefix for the device file. At mount time, an incrementing
|
||||
// number is suffixed until a free FUSE device is found.
|
||||
DevicePrefix string
|
||||
// Path of the load helper, used to load the kernel extension if
|
||||
// no device files are found.
|
||||
Load string
|
||||
// Path of the mount helper, used for the actual mount operation.
|
||||
Mount string
|
||||
// Environment variable used to pass the path to the executable
|
||||
// calling the mount helper.
|
||||
DaemonVar string
|
||||
}
|
||||
|
||||
// Default paths for OSXFUSE. See OSXFUSELocations.
|
||||
var (
|
||||
OSXFUSELocationV3 = OSXFUSEPaths{
|
||||
DevicePrefix: "/dev/osxfuse",
|
||||
Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse",
|
||||
Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse",
|
||||
DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH",
|
||||
}
|
||||
OSXFUSELocationV2 = OSXFUSEPaths{
|
||||
DevicePrefix: "/dev/osxfuse",
|
||||
Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs",
|
||||
Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs",
|
||||
DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH",
|
||||
}
|
||||
)
|
||||
|
||||
// OSXFUSELocations sets where to look for OSXFUSE files. The
|
||||
// arguments are all the possible locations. The previous locations
|
||||
// are replaced.
|
||||
//
|
||||
// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are
|
||||
// used.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
if len(paths) == 0 {
|
||||
return errors.New("must specify at least one location for OSXFUSELocations")
|
||||
}
|
||||
// replace previous values, but make a copy so there's no
|
||||
// worries about caller mutating their slice
|
||||
conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowNonEmptyMount allows the mounting over a non-empty directory.
|
||||
//
|
||||
// The files in it will be shadowed by the freshly created mount. By
|
||||
// default these mounts are rejected to prevent accidental covering up
|
||||
// of data, which could for example prevent automatic backup.
|
||||
func AllowNonEmptyMount() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["nonempty"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
conf.options["local"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["volname"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["daemon_timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
conf.options["noapplexattr"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
conf.options["noappledouble"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
conf.options["excl_create"] = ""
|
||||
return nil
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Protocol is a FUSE protocol version number.
|
||||
type Protocol struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
}
|
||||
|
||||
func (p Protocol) String() string {
|
||||
return fmt.Sprintf("%d.%d", p.Major, p.Minor)
|
||||
}
|
||||
|
||||
// LT returns whether a is less than b.
|
||||
func (a Protocol) LT(b Protocol) bool {
|
||||
return a.Major < b.Major ||
|
||||
(a.Major == b.Major && a.Minor < b.Minor)
|
||||
}
|
||||
|
||||
// GE returns whether a is greater than or equal to b.
|
||||
func (a Protocol) GE(b Protocol) bool {
|
||||
return a.Major > b.Major ||
|
||||
(a.Major == b.Major && a.Minor >= b.Minor)
|
||||
}
|
||||
|
||||
func (a Protocol) is79() bool {
|
||||
return a.GE(Protocol{7, 9})
|
||||
}
|
||||
|
||||
// HasAttrBlockSize returns whether Attr.BlockSize is respected by the
|
||||
// kernel.
|
||||
func (a Protocol) HasAttrBlockSize() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
// HasReadWriteFlags returns whether ReadRequest/WriteRequest
|
||||
// fields Flags and FileFlags are valid.
|
||||
func (a Protocol) HasReadWriteFlags() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
// HasGetattrFlags returns whether GetattrRequest field Flags is
|
||||
// valid.
|
||||
func (a Protocol) HasGetattrFlags() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
func (a Protocol) is710() bool {
|
||||
return a.GE(Protocol{7, 10})
|
||||
}
|
||||
|
||||
// HasOpenNonSeekable returns whether OpenResponse field Flags flag
|
||||
// OpenNonSeekable is supported.
|
||||
func (a Protocol) HasOpenNonSeekable() bool {
|
||||
return a.is710()
|
||||
}
|
||||
|
||||
func (a Protocol) is712() bool {
|
||||
return a.GE(Protocol{7, 12})
|
||||
}
|
||||
|
||||
// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest
|
||||
// field Umask is valid.
|
||||
func (a Protocol) HasUmask() bool {
|
||||
return a.is712()
|
||||
}
|
||||
|
||||
// HasInvalidate returns whether InvalidateNode/InvalidateEntry are
|
||||
// supported.
|
||||
func (a Protocol) HasInvalidate() bool {
|
||||
return a.is712()
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// Unmount tries to unmount the filesystem mounted at dir.
|
||||
func Unmount(dir string) error {
|
||||
return unmount(dir)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func unmount(dir string) error {
|
||||
cmd := exec.Command("fusermount", "-u", dir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
output = bytes.TrimRight(output, "\n")
|
||||
msg := err.Error() + ": " + string(output)
|
||||
err = errors.New(msg)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func unmount(dir string) error {
|
||||
err := syscall.Unmount(dir, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "unmount", Path: dir, Err: err}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Coda Hale
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,15 +0,0 @@
|
|||
hdrhistogram
|
||||
============
|
||||
|
||||
[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram)
|
||||
|
||||
A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram).
|
||||
|
||||
> A Histogram that supports recording and analyzing sampled data value counts
|
||||
> across a configurable integer value range with configurable value precision
|
||||
> within the range. Value precision is expressed as the number of significant
|
||||
> digits in the value recording, and provides control over value quantization
|
||||
> behavior across the value range and the subsequent value resolution at any
|
||||
> given level.
|
||||
|
||||
For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram).
|
|
@ -1,564 +0,0 @@
|
|||
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
|
||||
// data structure. The HDR Histogram allows for fast and accurate analysis of
|
||||
// the extreme ranges of data with non-normal distributions, like latency.
|
||||
package hdrhistogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// A Bracket is a part of a cumulative distribution.
|
||||
type Bracket struct {
|
||||
Quantile float64
|
||||
Count, ValueAt int64
|
||||
}
|
||||
|
||||
// A Snapshot is an exported view of a Histogram, useful for serializing them.
|
||||
// A Histogram can be constructed from it by passing it to Import.
|
||||
type Snapshot struct {
|
||||
LowestTrackableValue int64
|
||||
HighestTrackableValue int64
|
||||
SignificantFigures int64
|
||||
Counts []int64
|
||||
}
|
||||
|
||||
// A Histogram is a lossy data structure used to record the distribution of
|
||||
// non-normally distributed data (like latency) with a high degree of accuracy
|
||||
// and a bounded degree of precision.
|
||||
type Histogram struct {
|
||||
lowestTrackableValue int64
|
||||
highestTrackableValue int64
|
||||
unitMagnitude int64
|
||||
significantFigures int64
|
||||
subBucketHalfCountMagnitude int32
|
||||
subBucketHalfCount int32
|
||||
subBucketMask int64
|
||||
subBucketCount int32
|
||||
bucketCount int32
|
||||
countsLen int32
|
||||
totalCount int64
|
||||
counts []int64
|
||||
}
|
||||
|
||||
// New returns a new Histogram instance capable of tracking values in the given
|
||||
// range and with the given amount of precision.
|
||||
func New(minValue, maxValue int64, sigfigs int) *Histogram {
|
||||
if sigfigs < 1 || 5 < sigfigs {
|
||||
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
|
||||
}
|
||||
|
||||
largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs)
|
||||
subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution))))
|
||||
|
||||
subBucketHalfCountMagnitude := subBucketCountMagnitude
|
||||
if subBucketHalfCountMagnitude < 1 {
|
||||
subBucketHalfCountMagnitude = 1
|
||||
}
|
||||
subBucketHalfCountMagnitude--
|
||||
|
||||
unitMagnitude := int32(math.Floor(math.Log2(float64(minValue))))
|
||||
if unitMagnitude < 0 {
|
||||
unitMagnitude = 0
|
||||
}
|
||||
|
||||
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
|
||||
|
||||
subBucketHalfCount := subBucketCount / 2
|
||||
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
|
||||
|
||||
// determine exponent range needed to support the trackable value with no
|
||||
// overflow:
|
||||
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
|
||||
bucketsNeeded := int32(1)
|
||||
for smallestUntrackableValue < maxValue {
|
||||
smallestUntrackableValue <<= 1
|
||||
bucketsNeeded++
|
||||
}
|
||||
|
||||
bucketCount := bucketsNeeded
|
||||
countsLen := (bucketCount + 1) * (subBucketCount / 2)
|
||||
|
||||
return &Histogram{
|
||||
lowestTrackableValue: minValue,
|
||||
highestTrackableValue: maxValue,
|
||||
unitMagnitude: int64(unitMagnitude),
|
||||
significantFigures: int64(sigfigs),
|
||||
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
|
||||
subBucketHalfCount: subBucketHalfCount,
|
||||
subBucketMask: subBucketMask,
|
||||
subBucketCount: subBucketCount,
|
||||
bucketCount: bucketCount,
|
||||
countsLen: countsLen,
|
||||
totalCount: 0,
|
||||
counts: make([]int64, countsLen),
|
||||
}
|
||||
}
|
||||
|
||||
// ByteSize returns an estimate of the amount of memory allocated to the
|
||||
// histogram in bytes.
|
||||
//
|
||||
// N.B.: This does not take into account the overhead for slices, which are
|
||||
// small, constant, and specific to the compiler version.
|
||||
func (h *Histogram) ByteSize() int {
|
||||
return 6*8 + 5*4 + len(h.counts)*8
|
||||
}
|
||||
|
||||
// Merge merges the data stored in the given histogram with the receiver,
|
||||
// returning the number of recorded values which had to be dropped.
|
||||
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
|
||||
i := from.rIterator()
|
||||
for i.next() {
|
||||
v := i.valueFromIdx
|
||||
c := i.countAtIdx
|
||||
|
||||
if h.RecordValues(v, c) != nil {
|
||||
dropped += c
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TotalCount returns total number of values recorded.
|
||||
func (h *Histogram) TotalCount() int64 {
|
||||
return h.totalCount
|
||||
}
|
||||
|
||||
// Max returns the approximate maximum recorded value.
|
||||
func (h *Histogram) Max() int64 {
|
||||
var max int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
max = i.highestEquivalentValue
|
||||
}
|
||||
}
|
||||
return h.highestEquivalentValue(max)
|
||||
}
|
||||
|
||||
// Min returns the approximate minimum recorded value.
|
||||
func (h *Histogram) Min() int64 {
|
||||
var min int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 && min == 0 {
|
||||
min = i.highestEquivalentValue
|
||||
break
|
||||
}
|
||||
}
|
||||
return h.lowestEquivalentValue(min)
|
||||
}
|
||||
|
||||
// Mean returns the approximate arithmetic mean of the recorded values.
|
||||
func (h *Histogram) Mean() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
var total int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
return float64(total) / float64(h.totalCount)
|
||||
}
|
||||
|
||||
// StdDev returns the approximate standard deviation of the recorded values.
|
||||
func (h *Histogram) StdDev() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
mean := h.Mean()
|
||||
geometricDevTotal := 0.0
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
|
||||
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
|
||||
}
|
||||
|
||||
// Reset deletes all recorded values and restores the histogram to its original
|
||||
// state.
|
||||
func (h *Histogram) Reset() {
|
||||
h.totalCount = 0
|
||||
for i := range h.counts {
|
||||
h.counts[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// RecordValue records the given value, returning an error if the value is out
|
||||
// of range.
|
||||
func (h *Histogram) RecordValue(v int64) error {
|
||||
return h.RecordValues(v, 1)
|
||||
}
|
||||
|
||||
// RecordCorrectedValue records the given value, correcting for stalls in the
|
||||
// recording process. This only works for processes which are recording values
|
||||
// at an expected interval (e.g., doing jitter analysis). Processes which are
|
||||
// recording ad-hoc values (e.g., latency for incoming requests) can't take
|
||||
// advantage of this.
|
||||
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
|
||||
if err := h.RecordValue(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expectedInterval <= 0 || v <= expectedInterval {
|
||||
return nil
|
||||
}
|
||||
|
||||
missingValue := v - expectedInterval
|
||||
for missingValue >= expectedInterval {
|
||||
if err := h.RecordValue(missingValue); err != nil {
|
||||
return err
|
||||
}
|
||||
missingValue -= expectedInterval
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordValues records n occurrences of the given value, returning an error if
|
||||
// the value is out of range.
|
||||
func (h *Histogram) RecordValues(v, n int64) error {
|
||||
idx := h.countsIndexFor(v)
|
||||
if idx < 0 || int(h.countsLen) <= idx {
|
||||
return fmt.Errorf("value %d is too large to be recorded", v)
|
||||
}
|
||||
h.counts[idx] += n
|
||||
h.totalCount += n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
|
||||
func (h *Histogram) ValueAtQuantile(q float64) int64 {
|
||||
if q > 100 {
|
||||
q = 100
|
||||
}
|
||||
|
||||
total := int64(0)
|
||||
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
total += i.countAtIdx
|
||||
if total >= countAtPercentile {
|
||||
return h.highestEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// CumulativeDistribution returns an ordered list of brackets of the
|
||||
// distribution of recorded values.
|
||||
func (h *Histogram) CumulativeDistribution() []Bracket {
|
||||
var result []Bracket
|
||||
|
||||
i := h.pIterator(1)
|
||||
for i.next() {
|
||||
result = append(result, Bracket{
|
||||
Quantile: i.percentile,
|
||||
Count: i.countToIdx,
|
||||
ValueAt: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// SignificantFigures returns the significant figures used to create the
|
||||
// histogram
|
||||
func (h *Histogram) SignificantFigures() int64 {
|
||||
return h.significantFigures
|
||||
}
|
||||
|
||||
// LowestTrackableValue returns the lower bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) LowestTrackableValue() int64 {
|
||||
return h.lowestTrackableValue
|
||||
}
|
||||
|
||||
// HighestTrackableValue returns the upper bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) HighestTrackableValue() int64 {
|
||||
return h.highestTrackableValue
|
||||
}
|
||||
|
||||
// Histogram bar for plotting
|
||||
type Bar struct {
|
||||
From, To, Count int64
|
||||
}
|
||||
|
||||
// Pretty print as csv for easy plotting
|
||||
func (b Bar) String() string {
|
||||
return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count)
|
||||
}
|
||||
|
||||
// Distribution returns an ordered list of bars of the
|
||||
// distribution of recorded values, counts can be normalized to a probability
|
||||
func (h *Histogram) Distribution() (result []Bar) {
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
result = append(result, Bar{
|
||||
Count: i.countAtIdx,
|
||||
From: h.lowestEquivalentValue(i.valueFromIdx),
|
||||
To: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Equals returns true if the two Histograms are equivalent, false if not.
|
||||
func (h *Histogram) Equals(other *Histogram) bool {
|
||||
switch {
|
||||
case
|
||||
h.lowestTrackableValue != other.lowestTrackableValue,
|
||||
h.highestTrackableValue != other.highestTrackableValue,
|
||||
h.unitMagnitude != other.unitMagnitude,
|
||||
h.significantFigures != other.significantFigures,
|
||||
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
|
||||
h.subBucketHalfCount != other.subBucketHalfCount,
|
||||
h.subBucketMask != other.subBucketMask,
|
||||
h.subBucketCount != other.subBucketCount,
|
||||
h.bucketCount != other.bucketCount,
|
||||
h.countsLen != other.countsLen,
|
||||
h.totalCount != other.totalCount:
|
||||
return false
|
||||
default:
|
||||
for i, c := range h.counts {
|
||||
if c != other.counts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Export returns a snapshot view of the Histogram. This can be later passed to
|
||||
// Import to construct a new Histogram with the same state.
|
||||
func (h *Histogram) Export() *Snapshot {
|
||||
return &Snapshot{
|
||||
LowestTrackableValue: h.lowestTrackableValue,
|
||||
HighestTrackableValue: h.highestTrackableValue,
|
||||
SignificantFigures: h.significantFigures,
|
||||
Counts: append([]int64(nil), h.counts...), // copy
|
||||
}
|
||||
}
|
||||
|
||||
// Import returns a new Histogram populated from the Snapshot data (which the
|
||||
// caller must stop accessing).
|
||||
func Import(s *Snapshot) *Histogram {
|
||||
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
|
||||
h.counts = s.Counts
|
||||
totalCount := int64(0)
|
||||
for i := int32(0); i < h.countsLen; i++ {
|
||||
countAtIndex := h.counts[i]
|
||||
if countAtIndex > 0 {
|
||||
totalCount += countAtIndex
|
||||
}
|
||||
}
|
||||
h.totalCount = totalCount
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *Histogram) iterator() *iterator {
|
||||
return &iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) rIterator() *rIterator {
|
||||
return &rIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
|
||||
return &pIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
ticksPerHalfDistance: ticksPerHalfDistance,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
adjustedBucket := bucketIdx
|
||||
if subBucketIdx >= h.subBucketCount {
|
||||
adjustedBucket++
|
||||
}
|
||||
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
|
||||
}
|
||||
|
||||
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
|
||||
}
|
||||
|
||||
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return h.valueFromIndex(bucketIdx, subBucketIdx)
|
||||
}
|
||||
|
||||
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
|
||||
}
|
||||
|
||||
func (h *Histogram) highestEquivalentValue(v int64) int64 {
|
||||
return h.nextNonEquivalentValue(v) - 1
|
||||
}
|
||||
|
||||
func (h *Histogram) medianEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
|
||||
}
|
||||
|
||||
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
|
||||
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
|
||||
offsetInBucket := subBucketIdx - h.subBucketHalfCount
|
||||
return bucketBaseIdx + offsetInBucket
|
||||
}
|
||||
|
||||
func (h *Histogram) getBucketIndex(v int64) int32 {
|
||||
pow2Ceiling := bitLen(v | h.subBucketMask)
|
||||
return int32(pow2Ceiling - int64(h.unitMagnitude) -
|
||||
int64(h.subBucketHalfCountMagnitude+1))
|
||||
}
|
||||
|
||||
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
|
||||
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndexFor(v int64) int {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return int(h.countsIndex(bucketIdx, subBucketIdx))
|
||||
}
|
||||
|
||||
type iterator struct {
|
||||
h *Histogram
|
||||
bucketIdx, subBucketIdx int32
|
||||
countAtIdx, countToIdx, valueFromIdx int64
|
||||
highestEquivalentValue int64
|
||||
}
|
||||
|
||||
func (i *iterator) next() bool {
|
||||
if i.countToIdx >= i.h.totalCount {
|
||||
return false
|
||||
}
|
||||
|
||||
// increment bucket
|
||||
i.subBucketIdx++
|
||||
if i.subBucketIdx >= i.h.subBucketCount {
|
||||
i.subBucketIdx = i.h.subBucketHalfCount
|
||||
i.bucketIdx++
|
||||
}
|
||||
|
||||
if i.bucketIdx >= i.h.bucketCount {
|
||||
return false
|
||||
}
|
||||
|
||||
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.countToIdx += i.countAtIdx
|
||||
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type rIterator struct {
|
||||
iterator
|
||||
countAddedThisStep int64
|
||||
}
|
||||
|
||||
func (r *rIterator) next() bool {
|
||||
for r.iterator.next() {
|
||||
if r.countAtIdx != 0 {
|
||||
r.countAddedThisStep = r.countAtIdx
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type pIterator struct {
|
||||
iterator
|
||||
seenLastValue bool
|
||||
ticksPerHalfDistance int32
|
||||
percentileToIteratorTo float64
|
||||
percentile float64
|
||||
}
|
||||
|
||||
func (p *pIterator) next() bool {
|
||||
if !(p.countToIdx < p.h.totalCount) {
|
||||
if p.seenLastValue {
|
||||
return false
|
||||
}
|
||||
|
||||
p.seenLastValue = true
|
||||
p.percentile = 100
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if p.subBucketIdx == -1 && !p.iterator.next() {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = false
|
||||
for !done {
|
||||
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
|
||||
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
|
||||
p.percentile = p.percentileToIteratorTo
|
||||
halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1))
|
||||
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
|
||||
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
|
||||
return true
|
||||
}
|
||||
done = !p.iterator.next()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func bitLen(x int64) (n int64) {
|
||||
for ; x >= 0x8000; x >>= 16 {
|
||||
n += 16
|
||||
}
|
||||
if x >= 0x80 {
|
||||
x >>= 8
|
||||
n += 8
|
||||
}
|
||||
if x >= 0x8 {
|
||||
x >>= 4
|
||||
n += 4
|
||||
}
|
||||
if x >= 0x2 {
|
||||
x >>= 2
|
||||
n += 2
|
||||
}
|
||||
if x >= 0x1 {
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package hdrhistogram
|
||||
|
||||
// A WindowedHistogram combines histograms to provide windowed statistics.
|
||||
type WindowedHistogram struct {
|
||||
idx int
|
||||
h []Histogram
|
||||
m *Histogram
|
||||
|
||||
Current *Histogram
|
||||
}
|
||||
|
||||
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
|
||||
// the given parameters.
|
||||
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
|
||||
w := WindowedHistogram{
|
||||
idx: -1,
|
||||
h: make([]Histogram, n),
|
||||
m: New(minValue, maxValue, sigfigs),
|
||||
}
|
||||
|
||||
for i := range w.h {
|
||||
w.h[i] = *New(minValue, maxValue, sigfigs)
|
||||
}
|
||||
w.Rotate()
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
// Merge returns a histogram which includes the recorded values from all the
|
||||
// sections of the window.
|
||||
func (w *WindowedHistogram) Merge() *Histogram {
|
||||
w.m.Reset()
|
||||
for _, h := range w.h {
|
||||
w.m.Merge(&h)
|
||||
}
|
||||
return w.m
|
||||
}
|
||||
|
||||
// Rotate resets the oldest histogram and rotates it to be used as the current
|
||||
// histogram.
|
||||
func (w *WindowedHistogram) Rotate() {
|
||||
w.idx++
|
||||
w.Current = &w.h[w.idx%len(w.h)]
|
||||
w.Current.Reset()
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
Changes by Version
|
||||
==================
|
||||
|
||||
2.15.0 (unreleased)
|
||||
-------------------
|
||||
|
||||
- nothing yet
|
||||
|
||||
|
||||
2.14.0 (2018-04-30)
|
||||
-------------------
|
||||
|
||||
- Support throttling for debug traces (#274) <Isaac Hier>
|
||||
- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
|
||||
- Remove dependency on tchannel (#295) (#294) <Yuri Shkuro>
|
||||
- Test with Go 1.9 (#298) <Yuri Shkuro>
|
||||
|
||||
|
||||
2.13.0 (2018-04-15)
|
||||
-------------------
|
||||
|
||||
- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
|
||||
- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
|
||||
- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
|
||||
- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
|
||||
- Allow overriding sampler in the Config (#270) <Mike Kabischev>
|
||||
|
||||
|
||||
2.12.0 (2018-03-14)
|
||||
-------------------
|
||||
|
||||
- Use lock when retrieving span.Context() (#268)
|
||||
- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
|
||||
|
||||
|
||||
2.11.2 (2018-01-12)
|
||||
-------------------
|
||||
|
||||
- Add Gopkg.toml to allow using the lib with `dep`
|
||||
|
||||
|
||||
2.11.1 (2018-01-03)
|
||||
-------------------
|
||||
|
||||
- Do not enqueue spans after Reporter is closed (#235, #245)
|
||||
- Change default flush interval to 1sec (#243)
|
||||
|
||||
|
||||
2.11.0 (2017-11-27)
|
||||
-------------------
|
||||
|
||||
- Normalize metric names and tags to be compatible with Prometheus (#222)
|
||||
|
||||
|
||||
2.10.0 (2017-11-14)
|
||||
-------------------
|
||||
|
||||
- Support custom tracing headers (#176)
|
||||
- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
|
||||
- Do not coerce baggage keys to lower case (#196)
|
||||
- Log span name when span cannot be reported (#198)
|
||||
- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
|
||||
|
||||
|
||||
2.9.0 (2017-07-29)
|
||||
------------------
|
||||
|
||||
- Pin thrift <= 0.10 (#179)
|
||||
- Introduce a parallel interface ContribObserver (#159)
|
||||
|
||||
|
||||
2.8.0 (2017-07-05)
|
||||
------------------
|
||||
|
||||
- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
|
||||
- Add options to set tracer tags
|
||||
|
||||
|
||||
2.7.0 (2017-06-21)
|
||||
------------------
|
||||
|
||||
- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
|
||||
- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
|
||||
- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
|
||||
- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
|
||||
- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
|
||||
|
||||
|
||||
2.6.0 (2017-03-28)
|
||||
------------------
|
||||
|
||||
- Add config option to initialize RPC Metrics feature
|
||||
|
||||
|
||||
2.5.0 (2017-03-23)
|
||||
------------------
|
||||
|
||||
- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
|
||||
- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
|
||||
- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
|
||||
|
||||
|
||||
2.4.0 (2017-03-21)
|
||||
------------------
|
||||
|
||||
- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
|
||||
- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
|
||||
|
||||
|
||||
2.3.0 (2017-03-20)
|
||||
------------------
|
||||
|
||||
- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
|
||||
- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
|
||||
|
||||
|
||||
2.2.1 (2017-03-14)
|
||||
------------------
|
||||
|
||||
- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
|
||||
|
||||
|
||||
2.2.0 (2017-03-10)
|
||||
------------------
|
||||
|
||||
- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
|
||||
- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
|
||||
|
||||
|
||||
2.1.2 (2017-02-27)
|
||||
-------------------
|
||||
|
||||
- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
|
||||
- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
|
||||
- Add tracer initialization godoc examples
|
||||
|
||||
|
||||
2.1.1 (2017-02-21)
|
||||
-------------------
|
||||
|
||||
- Fix inefficient usage of zap.Logger
|
||||
|
||||
|
||||
2.1.0 (2017-02-17)
|
||||
-------------------
|
||||
|
||||
- Add adapter for zap.Logger (https://github.com/uber-go/zap)
|
||||
- Move logging API to ./log/ package
|
||||
|
||||
|
||||
2.0.0 (2017-02-08)
|
||||
-------------------
|
||||
|
||||
- Support Adaptive Sampling
|
||||
- Support 128bit Trace IDs
|
||||
- Change trace/span IDs from uint64 to strong types TraceID and SpanID
|
||||
- Add Zipkin HTTP B3 Propagation format support #72
|
||||
- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
|
||||
- Change API for tracer, reporter, sampler initialization
|
||||
|
||||
|
||||
1.6.0 (2016-10-14)
|
||||
-------------------
|
||||
|
||||
- Add Zipkin HTTP transport
|
||||
- Support external baggage via jaeger-baggage header
|
||||
- Unpin Thrift version, keep to master
|
||||
|
||||
|
||||
1.5.1 (2016-09-27)
|
||||
-------------------
|
||||
|
||||
- Relax dependency on opentracing to ^1
|
||||
|
||||
|
||||
1.5.0 (2016-09-27)
|
||||
-------------------
|
||||
|
||||
- Upgrade to opentracing-go 1.0
|
||||
- Support KV logging for Spans
|
||||
|
||||
|
||||
1.4.0 (2016-09-14)
|
||||
-------------------
|
||||
|
||||
- Support debug traces via HTTP header "jaeger-debug-id"
|
|
@ -1,170 +0,0 @@
|
|||
# How to Contribute to Jaeger
|
||||
|
||||
We'd love your help!
|
||||
|
||||
Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
|
||||
pull requests. This document outlines some of the conventions on development
|
||||
workflow, commit message formatting, contact points and other resources to make
|
||||
it easier to get your contribution accepted.
|
||||
|
||||
We gratefully welcome improvements to documentation as well as to code.
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the [Developer Certificate of
|
||||
Origin](https://developercertificate.org/) (DCO). This document was created
|
||||
by the Linux Kernel community and is a simple statement that you, as a
|
||||
contributor, have the legal right to make the contribution. See the [DCO](DCO)
|
||||
file for details.
|
||||
|
||||
## Getting Started
|
||||
|
||||
This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies.
|
||||
|
||||
To get started, make sure you clone the Git repository into the correct location
|
||||
`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
|
||||
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/uber
|
||||
cd $GOPATH/src/github.com/uber
|
||||
git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
|
||||
cd jaeger-client-go
|
||||
```
|
||||
|
||||
Then install dependencies and run the tests:
|
||||
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
glide install
|
||||
make test
|
||||
```
|
||||
|
||||
## Imports grouping
|
||||
|
||||
This projects follows the following pattern for grouping imports in Go files:
|
||||
* imports from standard library
|
||||
* imports from other projects
|
||||
* imports from `jaeger-client-go` project
|
||||
|
||||
For example:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
)
|
||||
```
|
||||
|
||||
## Making A Change
|
||||
|
||||
*Before making any significant changes, please [open an
|
||||
issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
|
||||
changes ahead of time will make the contribution process smooth for everyone.
|
||||
|
||||
Once we've discussed your changes and you've got your code ready, make sure
|
||||
that tests are passing (`make test` or `make cover`) and open your PR. Your
|
||||
pull request is most likely to be accepted if it:
|
||||
|
||||
* Includes tests for new functionality.
|
||||
* Follows the guidelines in [Effective
|
||||
Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
|
||||
review comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
|
||||
* Separate subject from body with a blank line
|
||||
* Limit the subject line to 50 characters
|
||||
* Capitalize the subject line
|
||||
* Do not end the subject line with a period
|
||||
* Use the imperative mood in the subject line
|
||||
* Wrap the body at 72 characters
|
||||
* Use the body to explain _what_ and _why_ instead of _how_
|
||||
* Each commit must be signed by the author ([see below](#sign-your-work)).
|
||||
|
||||
## License
|
||||
|
||||
By contributing your code, you agree to license your contribution under the terms
|
||||
of the [Apache License](LICENSE).
|
||||
|
||||
If you are adding a new file it should have a header like below. The easiest
|
||||
way to add such header is to run `make fmt`.
|
||||
|
||||
```
|
||||
// Copyright (c) 2017 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
```
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below (from
|
||||
[developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe@gmail.com>
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
You can add the sign off when creating the git commit via `git commit -s`.
|
||||
|
||||
If you want this to be automatic you can set up some aliases:
|
||||
|
||||
```
|
||||
git config --add alias.amend "commit -s --amend"
|
||||
git config --add alias.c "commit -s"
|
||||
```
|
|
@ -1,37 +0,0 @@
|
|||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
|
@ -1,164 +0,0 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/codahale/hdrhistogram"
|
||||
packages = ["."]
|
||||
revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/crossdock/crossdock-go"
|
||||
packages = [
|
||||
".",
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [
|
||||
".",
|
||||
"ext",
|
||||
"log"
|
||||
]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
"suite"
|
||||
]
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/uber-go/atomic"
|
||||
packages = ["."]
|
||||
revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
packages = [
|
||||
"metrics",
|
||||
"metrics/prometheus",
|
||||
"metrics/testutils"
|
||||
]
|
||||
revision = "4267858c0679cd4e47cefed8d7f70fd386cfb567"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/atomic"
|
||||
packages = ["."]
|
||||
revision = "54f72d32435d760d5604f17a82e2435b28dc4ba5"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/multierr"
|
||||
packages = ["."]
|
||||
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/zap"
|
||||
packages = [
|
||||
".",
|
||||
"buffer",
|
||||
"internal/bufferpool",
|
||||
"internal/color",
|
||||
"internal/exit",
|
||||
"zapcore"
|
||||
]
|
||||
revision = "eeedf312bc6c57391d84767a4cd413f02a917974"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp"
|
||||
]
|
||||
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f9dcfaf37a785c5dac1e20c29605eda29a83ba9c6f8842e92960dc94c8c4ff80"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
|
@ -1,27 +0,0 @@
|
|||
[[constraint]]
|
||||
name = "github.com/crossdock/crossdock-go"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "^1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "^1.1.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber-go/atomic"
|
||||
version = "^1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
version = "^1.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
version = "^1"
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,117 +0,0 @@
|
|||
PROJECT_ROOT=github.com/uber/jaeger-client-go
|
||||
PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...)
|
||||
# all .go files that don't exist in hidden directories
|
||||
ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
|
||||
-e ".*/\..*" \
|
||||
-e ".*/_.*" \
|
||||
-e ".*/mocks.*")
|
||||
|
||||
-include crossdock/rules.mk
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
RACE=-race
|
||||
GOTEST=go test -v $(RACE)
|
||||
GOLINT=golint
|
||||
GOVET=go vet
|
||||
GOFMT=gofmt
|
||||
FMT_LOG=fmt.log
|
||||
LINT_LOG=lint.log
|
||||
|
||||
THRIFT_VER=0.9.3
|
||||
THRIFT_IMG=thrift:$(THRIFT_VER)
|
||||
THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
|
||||
THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
|
||||
THRIFT_GEN_DIR=thrift-gen
|
||||
|
||||
PASS=$(shell printf "\033[32mPASS\033[0m")
|
||||
FAIL=$(shell printf "\033[31mFAIL\033[0m")
|
||||
COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
|
||||
|
||||
.DEFAULT_GOAL := test-and-lint
|
||||
|
||||
.PHONY: test-and-lint
|
||||
test-and-lint: test fmt lint
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
$(GOFMT) -e -s -l -w $(ALL_SRC)
|
||||
./scripts/updateLicenses.sh
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(GOVET) $(PACKAGES)
|
||||
@cat /dev/null > $(LINT_LOG)
|
||||
@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
|
||||
@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
|
||||
@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
|
||||
./scripts/updateLicenses.sh >> $(FMT_LOG)
|
||||
@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
|
||||
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
glide --version || go get github.com/Masterminds/glide
|
||||
ifeq ($(USE_DEP),true)
|
||||
dep ensure
|
||||
else
|
||||
glide install
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
./scripts/cover.sh $(shell go list $(PACKAGES))
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
|
||||
# This is not part of the regular test target because we don't want to slow it
|
||||
# down.
|
||||
.PHONY: test-examples
|
||||
test-examples:
|
||||
make -C examples
|
||||
|
||||
# TODO at the moment we're not generating tchan_*.go files
|
||||
thrift: idl-submodule thrift-image
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
|
||||
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
|
||||
sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
|
||||
sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
|
||||
sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
|
||||
$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
|
||||
rm -rf thrift-gen/*/*-remote
|
||||
rm -rf crossdock/thrift/*/*-remote
|
||||
rm -rf thrift-gen/jaeger/collector.go
|
||||
|
||||
idl-submodule:
|
||||
git submodule init
|
||||
git submodule update
|
||||
|
||||
thrift-image:
|
||||
$(THRIFT) -version
|
||||
|
||||
.PHONY: install-dep-ci
|
||||
install-dep-ci:
|
||||
- curl -L -s https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 -o $$GOPATH/bin/dep
|
||||
- chmod +x $$GOPATH/bin/dep
|
||||
|
||||
.PHONY: install-ci
|
||||
install-ci: install-dep-ci install
|
||||
go get github.com/wadey/gocovmerge
|
||||
go get github.com/mattn/goveralls
|
||||
go get golang.org/x/tools/cmd/cover
|
||||
go get github.com/golang/lint/golint
|
||||
|
||||
.PHONY: test-ci
|
||||
test-ci:
|
||||
@./scripts/cover.sh $(shell go list $(PACKAGES))
|
||||
make lint
|
||||
|
|
@ -1,260 +0,0 @@
|
|||
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
|
||||
|
||||
# Jaeger Bindings for Go OpenTracing API
|
||||
|
||||
Instrumentation library that implements an
|
||||
[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io).
|
||||
|
||||
**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
|
||||
* :white_check_mark: `import "github.com/uber/jaeger-client-go"`
|
||||
* :x: `import "github.com/jaegertracing/jaeger-client-go"`
|
||||
|
||||
## How to Contribute
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## Installation
|
||||
|
||||
We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide)
|
||||
and [semantic versioning](http://semver.org/) when including this library into an application.
|
||||
For example, Jaeger backend imports this library like this:
|
||||
|
||||
```yaml
|
||||
- package: github.com/uber/jaeger-client-go
|
||||
version: ^2.7.0
|
||||
```
|
||||
|
||||
If you instead want to use the latest version in `master`, you can pull it via `go get`.
|
||||
Note that during `go get` you may see build errors due to incompatible dependencies, which is why
|
||||
we recommend using semantic versions for dependencies. The error may be fixed by running
|
||||
`make install` (it will install `glide` if you don't have it):
|
||||
|
||||
```shell
|
||||
go get -u github.com/uber/jaeger-client-go/
|
||||
cd $GOPATH/src/github.com/uber/jaeger-client-go/
|
||||
git submodule update --init --recursive
|
||||
make install
|
||||
```
|
||||
|
||||
## Initialization
|
||||
|
||||
See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
|
||||
and [config/example_test.go](./config/example_test.go).
|
||||
|
||||
### Environment variables
|
||||
|
||||
The tracer can be initialized with values coming from environment variables. None of the env vars are required
|
||||
and all of them can be overriden via direct setting of the property on the configuration object.
|
||||
|
||||
Property| Description
|
||||
--- | ---
|
||||
JAEGER_SERVICE_NAME | The service name
|
||||
JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
|
||||
JAEGER_AGENT_PORT | The port for communicating with agent via UDP
|
||||
JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
|
||||
JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
|
||||
JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval (ms)
|
||||
JAEGER_SAMPLER_TYPE | The sampler type
|
||||
JAEGER_SAMPLER_PARAM | The sampler parameter (number)
|
||||
JAEGER_SAMPLER_MANAGER_HOST_PORT | The host name and port when using the remote controlled sampler
|
||||
JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
|
||||
JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy
|
||||
JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
|
||||
JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
|
||||
JAEGER_RPC_METRICS | Whether to store RPC metrics
|
||||
|
||||
### Closing the tracer via `io.Closer`
|
||||
|
||||
The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
|
||||
It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
|
||||
before exiting, e.g.
|
||||
|
||||
```go
|
||||
tracer, closer, err := cfg.NewTracer(...)
|
||||
defer closer.Close()
|
||||
```
|
||||
|
||||
This is especially useful for command-line tools that enable tracing, as well as
|
||||
for the long-running apps that support graceful shutdown. For example, if your deployment
|
||||
system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
|
||||
exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed.
|
||||
|
||||
### Metrics & Monitoring
|
||||
|
||||
The tracer emits a number of different metrics, defined in
|
||||
[metrics.go](metrics.go). The monitoring backend is expected to support
|
||||
tag-based metric names, e.g. instead of `statsd`-style string names
|
||||
like `counters.my-service.jaeger.spans.started.sampled`, the metrics
|
||||
are defined by a short name and a collection of key/value tags, for
|
||||
example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
|
||||
file for the full list and descriptions of emitted metrics.
|
||||
|
||||
The monitoring backend is represented by the `metrics.Factory` interface from package
|
||||
[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
|
||||
of that interface can be passed as an option to either the Configuration object or the Tracer
|
||||
constructor, for example:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
"github.com/uber/jaeger-lib/metrics/prometheus"
|
||||
)
|
||||
|
||||
metricsFactory := prometheus.New()
|
||||
tracer, closer, err := config.Configuration{
|
||||
ServiceName: "your-service-name",
|
||||
}.NewTracer(
|
||||
config.Metrics(metricsFactory),
|
||||
)
|
||||
```
|
||||
|
||||
By default, a no-op `metrics.NullFactory` is used.
|
||||
|
||||
### Logging
|
||||
|
||||
The tracer can be configured with an optional logger, which will be
|
||||
used to log communication errors, or log spans if a logging reporter
|
||||
option is specified in the configuration. The logging API is abstracted
|
||||
by the [Logger](logger.go) interface. A logger instance implementing
|
||||
this interface can be set on the `Config` object before calling the
|
||||
`New` method.
|
||||
|
||||
Besides the [zap](https://github.com/uber-go/zap) implementation
|
||||
bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
|
||||
one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
|
||||
|
||||
## Instrumentation for Tracing
|
||||
|
||||
Since this tracer is fully compliant with OpenTracing API 1.0,
|
||||
all code instrumentation should only use the API itself, as described
|
||||
in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
|
||||
|
||||
## Features
|
||||
|
||||
### Reporters
|
||||
|
||||
A "reporter" is a component that receives the finished spans and reports
|
||||
them to somewhere. Under normal circumstances, the Tracer
|
||||
should use the default `RemoteReporter`, which sends the spans out of
|
||||
process via configurable "transport". For testing purposes, one can
|
||||
use an `InMemoryReporter` that accumulates spans in a buffer and
|
||||
allows to retrieve them for later verification. Also available are
|
||||
`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
|
||||
which logs all finished spans using their `String()` method, and a
|
||||
`CompositeReporter` that can be used to combine more than one reporter
|
||||
into one, e.g. to attach a logging reporter to the main remote reporter.
|
||||
|
||||
### Span Reporting Transports
|
||||
|
||||
The remote reporter uses "transports" to actually send the spans out
|
||||
of process. Currently the supported transports include:
|
||||
* [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
|
||||
* [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
|
||||
|
||||
### Sampling
|
||||
|
||||
The tracer does not record all spans, but only those that have the
|
||||
sampling bit set in the `flags`. When a new trace is started and a new
|
||||
unique ID is generated, a sampling decision is made whether this trace
|
||||
should be sampled. The sampling decision is propagated to all downstream
|
||||
calls via the `flags` field of the trace context. The following samplers
|
||||
are available:
|
||||
1. `RemotelyControlledSampler` uses one of the other simpler samplers
|
||||
and periodically updates it by polling an external server. This
|
||||
allows dynamic control of the sampling strategies.
|
||||
1. `ConstSampler` always makes the same sampling decision for all
|
||||
trace IDs. it can be configured to either sample all traces, or
|
||||
to sample none.
|
||||
1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
|
||||
for a given trace to be sampled. The actual decision is made by
|
||||
comparing the trace ID with a random number multiplied by the
|
||||
sampling rate.
|
||||
1. `RateLimitingSampler` can be used to allow only a certain fixed
|
||||
number of traces to be sampled per second.
|
||||
|
||||
### Baggage Injection
|
||||
|
||||
The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
|
||||
to the span context and propagated throughout the trace. An external process can inject baggage
|
||||
by setting the special HTTP Header `jaeger-baggage` on a request:
|
||||
|
||||
```sh
|
||||
curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
|
||||
```
|
||||
|
||||
Baggage can also be programatically set inside your service:
|
||||
|
||||
```go
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
span.SetBaggageItem("key", "value")
|
||||
}
|
||||
```
|
||||
|
||||
Another service downstream of that can retrieve the baggage in a similar way:
|
||||
|
||||
```go
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
val := span.BaggageItem("key")
|
||||
println(val)
|
||||
}
|
||||
```
|
||||
|
||||
### Debug Traces (Forced Sampling)
|
||||
|
||||
#### Programmatically
|
||||
|
||||
The OpenTracing API defines a `sampling.priority` standard tag that
|
||||
can be used to affect the sampling of a span and its children:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
)
|
||||
|
||||
span := opentracing.SpanFromContext(ctx)
|
||||
ext.SamplingPriority.Set(span, 1)
|
||||
```
|
||||
|
||||
#### Via HTTP Headers
|
||||
|
||||
Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
|
||||
which can be set in the incoming request, e.g.
|
||||
|
||||
```sh
|
||||
curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
|
||||
```
|
||||
|
||||
When Jaeger sees this header in the request that otherwise has no
|
||||
tracing context, it ensures that the new trace started for this
|
||||
request will be sampled in the "debug" mode (meaning it should survive
|
||||
all downsampling that might happen in the collection pipeline), and the
|
||||
root span will have a tag as if this statement was executed:
|
||||
|
||||
```go
|
||||
span.SetTag("jaeger-debug-id", "some-correlation-id")
|
||||
```
|
||||
|
||||
This allows using Jaeger UI to find the trace by this tag.
|
||||
|
||||
### Zipkin HTTP B3 compatible header propagation
|
||||
|
||||
Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
|
||||
by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
|
||||
|
||||
However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0 License](LICENSE).
|
||||
|
||||
|
||||
[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
|
||||
[doc]: https://godoc.org/github.com/uber/jaeger-client-go
|
||||
[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
|
||||
[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
|
||||
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
|
||||
[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
|
||||
[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
|
||||
[ot-url]: http://opentracing.io
|
||||
[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
|
|
@ -1,11 +0,0 @@
|
|||
# Release Process
|
||||
|
||||
1. Create a PR "Preparing for release X.Y.Z" against master branch
|
||||
* Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
|
||||
* Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
|
||||
2. Create a release "Release X.Y.Z" on Github
|
||||
* Create Tag `vX.Y.Z`
|
||||
* Copy CHANGELOG.md into the release notes
|
||||
3. Create a PR "Back to development" against master branch
|
||||
* Add `<next_version> (unreleased)` to CHANGELOG.md
|
||||
* Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
|
|
@ -1,77 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
|
||||
"github.com/uber/jaeger-client-go/internal/baggage"
|
||||
)
|
||||
|
||||
// baggageSetter is an actor that can set a baggage value on a Span given certain
|
||||
// restrictions (eg. maxValueLength).
|
||||
type baggageSetter struct {
|
||||
restrictionManager baggage.RestrictionManager
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
|
||||
return &baggageSetter{
|
||||
restrictionManager: restrictionManager,
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// (NB) span should hold the lock before making this call
|
||||
func (s *baggageSetter) setBaggage(span *Span, key, value string) {
|
||||
var truncated bool
|
||||
var prevItem string
|
||||
restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
|
||||
if !restriction.KeyAllowed() {
|
||||
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
|
||||
s.metrics.BaggageUpdateFailure.Inc(1)
|
||||
return
|
||||
}
|
||||
if len(value) > restriction.MaxValueLength() {
|
||||
truncated = true
|
||||
value = value[:restriction.MaxValueLength()]
|
||||
s.metrics.BaggageTruncate.Inc(1)
|
||||
}
|
||||
prevItem = span.context.baggage[key]
|
||||
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
|
||||
span.context = span.context.WithBaggageItem(key, value)
|
||||
s.metrics.BaggageUpdateSuccess.Inc(1)
|
||||
}
|
||||
|
||||
func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
|
||||
if !span.context.IsSampled() {
|
||||
return
|
||||
}
|
||||
fields := []log.Field{
|
||||
log.String("event", "baggage"),
|
||||
log.String("key", key),
|
||||
log.String("value", value),
|
||||
}
|
||||
if prevItem != "" {
|
||||
fields = append(fields, log.String("override", "true"))
|
||||
}
|
||||
if truncated {
|
||||
fields = append(fields, log.String("truncated", "true"))
|
||||
}
|
||||
if !valid {
|
||||
fields = append(fields, log.String("invalid", "true"))
|
||||
}
|
||||
span.logFieldsNoLocking(fields...)
|
||||
}
|
|
@ -1,373 +0,0 @@
|
|||
// Copyright (c) 2017-2018 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
"github.com/uber/jaeger-client-go/internal/baggage/remote"
|
||||
throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
|
||||
"github.com/uber/jaeger-client-go/rpcmetrics"
|
||||
)
|
||||
|
||||
const defaultSamplingProbability = 0.001
|
||||
|
||||
// Configuration configures and creates Jaeger Tracer
|
||||
type Configuration struct {
|
||||
// ServiceName specifies the service name to use on the tracer.
|
||||
// Can be provided via environment variable named JAEGER_SERVICE_NAME
|
||||
ServiceName string `yaml:"serviceName"`
|
||||
|
||||
// Disabled can be provided via environment variable named JAEGER_DISABLED
|
||||
Disabled bool `yaml:"disabled"`
|
||||
|
||||
// RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
|
||||
RPCMetrics bool `yaml:"rpc_metrics"`
|
||||
|
||||
// Tags can be provided via environment variable named JAEGER_TAGS
|
||||
Tags []opentracing.Tag `yaml:"tags"`
|
||||
|
||||
Sampler *SamplerConfig `yaml:"sampler"`
|
||||
Reporter *ReporterConfig `yaml:"reporter"`
|
||||
Headers *jaeger.HeadersConfig `yaml:"headers"`
|
||||
BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
|
||||
Throttler *ThrottlerConfig `yaml:"throttler"`
|
||||
}
|
||||
|
||||
// SamplerConfig allows initializing a non-default sampler. All fields are optional.
|
||||
type SamplerConfig struct {
|
||||
// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
|
||||
Type string `yaml:"type"`
|
||||
|
||||
// Param is a value passed to the sampler.
|
||||
// Valid values for Param field are:
|
||||
// - for "const" sampler, 0 or 1 for always false/true respectively
|
||||
// - for "probabilistic" sampler, a probability between 0 and 1
|
||||
// - for "rateLimiting" sampler, the number of spans per second
|
||||
// - for "remote" sampler, param is the same as for "probabilistic"
|
||||
// and indicates the initial sampling rate before the actual one
|
||||
// is received from the mothership.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
|
||||
Param float64 `yaml:"param"`
|
||||
|
||||
// SamplingServerURL is the address of jaeger-agent's HTTP sampling server
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
|
||||
SamplingServerURL string `yaml:"samplingServerURL"`
|
||||
|
||||
// MaxOperations is the maximum number of operations that the sampler
|
||||
// will keep track of. If an operation is not tracked, a default probabilistic
|
||||
// sampler will be used rather than the per operation specific sampler.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS
|
||||
MaxOperations int `yaml:"maxOperations"`
|
||||
|
||||
// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
|
||||
// jaeger-agent for the appropriate sampling strategy.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
|
||||
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
|
||||
}
|
||||
|
||||
// ReporterConfig configures the reporter. All fields are optional.
|
||||
type ReporterConfig struct {
|
||||
// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
|
||||
// new spans. The queue is continuously drained by a background go-routine, as fast as spans
|
||||
// can be sent out of process.
|
||||
// Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
|
||||
QueueSize int `yaml:"queueSize"`
|
||||
|
||||
// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
|
||||
// It is generally not useful, as it only matters for very low traffic services.
|
||||
// Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
|
||||
BufferFlushInterval time.Duration
|
||||
|
||||
// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
|
||||
// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
|
||||
// for this option to have any effect.
|
||||
// Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
|
||||
LogSpans bool `yaml:"logSpans"`
|
||||
|
||||
// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
|
||||
// Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
|
||||
LocalAgentHostPort string `yaml:"localAgentHostPort"`
|
||||
}
|
||||
|
||||
// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
|
||||
// certain baggage keys. All fields are optional.
|
||||
type BaggageRestrictionsConfig struct {
|
||||
// DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
|
||||
// manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
|
||||
// been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
|
||||
// restrictions have been retrieved from jaeger-agent.
|
||||
DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
|
||||
|
||||
// HostPort is the hostPort of jaeger-agent's baggage restrictions server
|
||||
HostPort string `yaml:"hostPort"`
|
||||
|
||||
// RefreshInterval controls how often the baggage restriction manager will poll
|
||||
// jaeger-agent for the most recent baggage restrictions.
|
||||
RefreshInterval time.Duration `yaml:"refreshInterval"`
|
||||
}
|
||||
|
||||
// ThrottlerConfig configures the throttler which can be used to throttle the
|
||||
// rate at which the client may send debug requests.
|
||||
type ThrottlerConfig struct {
|
||||
// HostPort of jaeger-agent's credit server.
|
||||
HostPort string `yaml:"hostPort"`
|
||||
|
||||
// RefreshInterval controls how often the throttler will poll jaeger-agent
|
||||
// for more throttling credits.
|
||||
RefreshInterval time.Duration `yaml:"refreshInterval"`
|
||||
|
||||
// SynchronousInitialization determines whether or not the throttler should
|
||||
// synchronously fetch credits from the agent when an operation is seen for
|
||||
// the first time. This should be set to true if the client will be used by
|
||||
// a short lived service that needs to ensure that credits are fetched
|
||||
// upfront such that sampling or throttling occurs.
|
||||
SynchronousInitialization bool `yaml:"synchronousInitialization"`
|
||||
}
|
||||
|
||||
type nullCloser struct{}
|
||||
|
||||
func (*nullCloser) Close() error { return nil }
|
||||
|
||||
// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
|
||||
// before shutdown.
|
||||
//
|
||||
// Deprecated: use NewTracer() function
|
||||
func (c Configuration) New(
|
||||
serviceName string,
|
||||
options ...Option,
|
||||
) (opentracing.Tracer, io.Closer, error) {
|
||||
if serviceName != "" {
|
||||
c.ServiceName = serviceName
|
||||
}
|
||||
|
||||
return c.NewTracer(options...)
|
||||
}
|
||||
|
||||
// NewTracer returns a new tracer based on the current configuration, using the given options,
|
||||
// and a closer func that can be used to flush buffers before shutdown.
|
||||
func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
|
||||
if c.ServiceName == "" {
|
||||
return nil, nil, errors.New("no service name provided")
|
||||
}
|
||||
|
||||
if c.Disabled {
|
||||
return &opentracing.NoopTracer{}, &nullCloser{}, nil
|
||||
}
|
||||
opts := applyOptions(options...)
|
||||
tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
|
||||
if c.RPCMetrics {
|
||||
Observer(
|
||||
rpcmetrics.NewObserver(
|
||||
opts.metrics.Namespace("jaeger-rpc", map[string]string{"component": "jaeger"}),
|
||||
rpcmetrics.DefaultNameNormalizer,
|
||||
),
|
||||
)(&opts) // adds to c.observers
|
||||
}
|
||||
if c.Sampler == nil {
|
||||
c.Sampler = &SamplerConfig{
|
||||
Type: jaeger.SamplerTypeRemote,
|
||||
Param: defaultSamplingProbability,
|
||||
}
|
||||
}
|
||||
if c.Reporter == nil {
|
||||
c.Reporter = &ReporterConfig{}
|
||||
}
|
||||
|
||||
sampler := opts.sampler
|
||||
if sampler == nil {
|
||||
s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sampler = s
|
||||
}
|
||||
|
||||
reporter := opts.reporter
|
||||
if reporter == nil {
|
||||
r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
reporter = r
|
||||
}
|
||||
|
||||
tracerOptions := []jaeger.TracerOption{
|
||||
jaeger.TracerOptions.Metrics(tracerMetrics),
|
||||
jaeger.TracerOptions.Logger(opts.logger),
|
||||
jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
|
||||
jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
|
||||
jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
|
||||
jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
|
||||
}
|
||||
|
||||
for _, tag := range opts.tags {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
|
||||
}
|
||||
|
||||
for _, tag := range c.Tags {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
|
||||
}
|
||||
|
||||
for _, obs := range opts.observers {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
|
||||
}
|
||||
|
||||
for _, cobs := range opts.contribObservers {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
|
||||
}
|
||||
|
||||
for format, injector := range opts.injectors {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
|
||||
}
|
||||
|
||||
for format, extractor := range opts.extractors {
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
|
||||
}
|
||||
|
||||
if c.BaggageRestrictions != nil {
|
||||
mgr := remote.NewRestrictionManager(
|
||||
c.ServiceName,
|
||||
remote.Options.Metrics(tracerMetrics),
|
||||
remote.Options.Logger(opts.logger),
|
||||
remote.Options.HostPort(c.BaggageRestrictions.HostPort),
|
||||
remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
|
||||
remote.Options.DenyBaggageOnInitializationFailure(
|
||||
c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
|
||||
),
|
||||
)
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
|
||||
}
|
||||
|
||||
if c.Throttler != nil {
|
||||
debugThrottler := throttler.NewThrottler(
|
||||
c.ServiceName,
|
||||
throttler.Options.Metrics(tracerMetrics),
|
||||
throttler.Options.Logger(opts.logger),
|
||||
throttler.Options.HostPort(c.Throttler.HostPort),
|
||||
throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
|
||||
throttler.Options.SynchronousInitialization(
|
||||
c.Throttler.SynchronousInitialization,
|
||||
),
|
||||
)
|
||||
|
||||
tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
|
||||
}
|
||||
|
||||
tracer, closer := jaeger.NewTracer(
|
||||
c.ServiceName,
|
||||
sampler,
|
||||
reporter,
|
||||
tracerOptions...,
|
||||
)
|
||||
|
||||
return tracer, closer, nil
|
||||
}
|
||||
|
||||
// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
|
||||
// It returns a closer func that can be used to flush buffers before shutdown.
|
||||
func (c Configuration) InitGlobalTracer(
|
||||
serviceName string,
|
||||
options ...Option,
|
||||
) (io.Closer, error) {
|
||||
if c.Disabled {
|
||||
return &nullCloser{}, nil
|
||||
}
|
||||
tracer, closer, err := c.New(serviceName, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
return closer, nil
|
||||
}
|
||||
|
||||
// NewSampler creates a new sampler based on the configuration
|
||||
func (sc *SamplerConfig) NewSampler(
|
||||
serviceName string,
|
||||
metrics *jaeger.Metrics,
|
||||
) (jaeger.Sampler, error) {
|
||||
samplerType := strings.ToLower(sc.Type)
|
||||
if samplerType == jaeger.SamplerTypeConst {
|
||||
return jaeger.NewConstSampler(sc.Param != 0), nil
|
||||
}
|
||||
if samplerType == jaeger.SamplerTypeProbabilistic {
|
||||
if sc.Param >= 0 && sc.Param <= 1.0 {
|
||||
return jaeger.NewProbabilisticSampler(sc.Param)
|
||||
}
|
||||
return nil, fmt.Errorf(
|
||||
"Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1",
|
||||
sc.Param,
|
||||
)
|
||||
}
|
||||
if samplerType == jaeger.SamplerTypeRateLimiting {
|
||||
return jaeger.NewRateLimitingSampler(sc.Param), nil
|
||||
}
|
||||
if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
|
||||
sc2 := *sc
|
||||
sc2.Type = jaeger.SamplerTypeProbabilistic
|
||||
initSampler, err := sc2.NewSampler(serviceName, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := []jaeger.SamplerOption{
|
||||
jaeger.SamplerOptions.Metrics(metrics),
|
||||
jaeger.SamplerOptions.InitialSampler(initSampler),
|
||||
jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
|
||||
}
|
||||
if sc.MaxOperations != 0 {
|
||||
options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations))
|
||||
}
|
||||
if sc.SamplingRefreshInterval != 0 {
|
||||
options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
|
||||
}
|
||||
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
|
||||
}
|
||||
|
||||
// NewReporter instantiates a new reporter that submits spans to tcollector
|
||||
func (rc *ReporterConfig) NewReporter(
|
||||
serviceName string,
|
||||
metrics *jaeger.Metrics,
|
||||
logger jaeger.Logger,
|
||||
) (jaeger.Reporter, error) {
|
||||
sender, err := rc.newTransport()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reporter := jaeger.NewRemoteReporter(
|
||||
sender,
|
||||
jaeger.ReporterOptions.QueueSize(rc.QueueSize),
|
||||
jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
|
||||
jaeger.ReporterOptions.Logger(logger),
|
||||
jaeger.ReporterOptions.Metrics(metrics))
|
||||
if rc.LogSpans && logger != nil {
|
||||
logger.Infof("Initializing logging reporter\n")
|
||||
reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
|
||||
}
|
||||
return reporter, err
|
||||
}
|
||||
|
||||
func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
|
||||
return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
// Copyright (c) 2018 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// environment variable names
|
||||
envServiceName = "JAEGER_SERVICE_NAME"
|
||||
envDisabled = "JAEGER_DISABLED"
|
||||
envRPCMetrics = "JAEGER_RPC_METRICS"
|
||||
envTags = "JAEGER_TAGS"
|
||||
envSamplerType = "JAEGER_SAMPLER_TYPE"
|
||||
envSamplerParam = "JAEGER_SAMPLER_PARAM"
|
||||
envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
|
||||
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
|
||||
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
|
||||
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
|
||||
envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
|
||||
envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
|
||||
envAgentHost = "JAEGER_AGENT_HOST"
|
||||
envAgentPort = "JAEGER_AGENT_PORT"
|
||||
)
|
||||
|
||||
// FromEnv uses environment variables to set the tracer's Configuration
|
||||
func FromEnv() (*Configuration, error) {
|
||||
c := &Configuration{}
|
||||
|
||||
if e := os.Getenv(envServiceName); e != "" {
|
||||
c.ServiceName = e
|
||||
}
|
||||
|
||||
if e := os.Getenv(envRPCMetrics); e != "" {
|
||||
if value, err := strconv.ParseBool(e); err == nil {
|
||||
c.RPCMetrics = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envDisabled); e != "" {
|
||||
if value, err := strconv.ParseBool(e); err == nil {
|
||||
c.Disabled = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envTags); e != "" {
|
||||
c.Tags = parseTags(e)
|
||||
}
|
||||
|
||||
if s, err := samplerConfigFromEnv(); err == nil {
|
||||
c.Sampler = s
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
|
||||
}
|
||||
|
||||
if r, err := reporterConfigFromEnv(); err == nil {
|
||||
c.Reporter = r
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
|
||||
func samplerConfigFromEnv() (*SamplerConfig, error) {
|
||||
sc := &SamplerConfig{}
|
||||
|
||||
if e := os.Getenv(envSamplerType); e != "" {
|
||||
sc.Type = e
|
||||
}
|
||||
|
||||
if e := os.Getenv(envSamplerParam); e != "" {
|
||||
if value, err := strconv.ParseFloat(e, 64); err == nil {
|
||||
sc.Param = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envSamplerManagerHostPort); e != "" {
|
||||
sc.SamplingServerURL = e
|
||||
}
|
||||
|
||||
if e := os.Getenv(envSamplerMaxOperations); e != "" {
|
||||
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
|
||||
sc.MaxOperations = int(value)
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envSamplerRefreshInterval); e != "" {
|
||||
if value, err := time.ParseDuration(e); err == nil {
|
||||
sc.SamplingRefreshInterval = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
|
||||
}
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
|
||||
func reporterConfigFromEnv() (*ReporterConfig, error) {
|
||||
rc := &ReporterConfig{}
|
||||
|
||||
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
|
||||
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
|
||||
rc.QueueSize = int(value)
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envReporterFlushInterval); e != "" {
|
||||
if value, err := time.ParseDuration(e); err == nil {
|
||||
rc.BufferFlushInterval = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
|
||||
}
|
||||
}
|
||||
|
||||
if e := os.Getenv(envReporterLogSpans); e != "" {
|
||||
if value, err := strconv.ParseBool(e); err == nil {
|
||||
rc.LogSpans = value
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
|
||||
}
|
||||
}
|
||||
|
||||
host := jaeger.DefaultUDPSpanServerHost
|
||||
if e := os.Getenv(envAgentHost); e != "" {
|
||||
host = e
|
||||
}
|
||||
|
||||
port := jaeger.DefaultUDPSpanServerPort
|
||||
if e := os.Getenv(envAgentPort); e != "" {
|
||||
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
|
||||
port = int(value)
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
|
||||
}
|
||||
}
|
||||
|
||||
// the side effect of this is that we are building the default value, even if none of the env vars
|
||||
// were not explicitly passed
|
||||
rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// parseTags parses the given string into a collection of Tags.
|
||||
// Spec for this value:
|
||||
// - comma separated list of key=value
|
||||
// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
|
||||
// is an environment variable and `defaultValue` is the value to use in case the env var is not set
|
||||
func parseTags(sTags string) []opentracing.Tag {
|
||||
pairs := strings.Split(sTags, ",")
|
||||
tags := make([]opentracing.Tag, 0)
|
||||
for _, p := range pairs {
|
||||
kv := strings.SplitN(p, "=", 2)
|
||||
k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
|
||||
|
||||
if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
|
||||
ed := strings.SplitN(v[2:len(v)-1], ":", 2)
|
||||
e, d := ed[0], ed[1]
|
||||
v = os.Getenv(e)
|
||||
if v == "" && d != "" {
|
||||
v = d
|
||||
}
|
||||
}
|
||||
|
||||
tag := opentracing.Tag{Key: k, Value: v}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
// Option is a function that sets some option on the client.
|
||||
type Option func(c *Options)
|
||||
|
||||
// Options control behavior of the client.
|
||||
type Options struct {
|
||||
metrics metrics.Factory
|
||||
logger jaeger.Logger
|
||||
reporter jaeger.Reporter
|
||||
sampler jaeger.Sampler
|
||||
contribObservers []jaeger.ContribObserver
|
||||
observers []jaeger.Observer
|
||||
gen128Bit bool
|
||||
zipkinSharedRPCSpan bool
|
||||
maxTagValueLength int
|
||||
tags []opentracing.Tag
|
||||
injectors map[interface{}]jaeger.Injector
|
||||
extractors map[interface{}]jaeger.Extractor
|
||||
}
|
||||
|
||||
// Metrics creates an Option that initializes Metrics in the tracer,
|
||||
// which is used to emit statistics about spans.
|
||||
func Metrics(factory metrics.Factory) Option {
|
||||
return func(c *Options) {
|
||||
c.metrics = factory
|
||||
}
|
||||
}
|
||||
|
||||
// Logger can be provided to log Reporter errors, as well as to log spans
|
||||
// if Reporter.LogSpans is set to true.
|
||||
func Logger(logger jaeger.Logger) Option {
|
||||
return func(c *Options) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// Reporter can be provided explicitly to override the configuration.
|
||||
// Useful for testing, e.g. by passing InMemoryReporter.
|
||||
func Reporter(reporter jaeger.Reporter) Option {
|
||||
return func(c *Options) {
|
||||
c.reporter = reporter
|
||||
}
|
||||
}
|
||||
|
||||
// Sampler can be provided explicitly to override the configuration.
|
||||
func Sampler(sampler jaeger.Sampler) Option {
|
||||
return func(c *Options) {
|
||||
c.sampler = sampler
|
||||
}
|
||||
}
|
||||
|
||||
// Observer can be registered with the Tracer to receive notifications about new Spans.
|
||||
func Observer(observer jaeger.Observer) Option {
|
||||
return func(c *Options) {
|
||||
c.observers = append(c.observers, observer)
|
||||
}
|
||||
}
|
||||
|
||||
// ContribObserver can be registered with the Tracer to recieve notifications
|
||||
// about new spans.
|
||||
func ContribObserver(observer jaeger.ContribObserver) Option {
|
||||
return func(c *Options) {
|
||||
c.contribObservers = append(c.contribObservers, observer)
|
||||
}
|
||||
}
|
||||
|
||||
// Gen128Bit specifies whether to generate 128bit trace IDs.
|
||||
func Gen128Bit(gen128Bit bool) Option {
|
||||
return func(c *Options) {
|
||||
c.gen128Bit = gen128Bit
|
||||
}
|
||||
}
|
||||
|
||||
// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
|
||||
// and server spans a la zipkin. If false, client and server spans will be assigned
|
||||
// different IDs.
|
||||
func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
|
||||
return func(c *Options) {
|
||||
c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
|
||||
}
|
||||
}
|
||||
|
||||
// MaxTagValueLength can be provided to override the default max tag value length.
|
||||
func MaxTagValueLength(maxTagValueLength int) Option {
|
||||
return func(c *Options) {
|
||||
c.maxTagValueLength = maxTagValueLength
|
||||
}
|
||||
}
|
||||
|
||||
// Tag creates an option that adds a tracer-level tag.
|
||||
func Tag(key string, value interface{}) Option {
|
||||
return func(c *Options) {
|
||||
c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
|
||||
// Injector registers an Injector with the given format.
|
||||
func Injector(format interface{}, injector jaeger.Injector) Option {
|
||||
return func(c *Options) {
|
||||
c.injectors[format] = injector
|
||||
}
|
||||
}
|
||||
|
||||
// Extractor registers an Extractor with the given format.
|
||||
func Extractor(format interface{}, extractor jaeger.Extractor) Option {
|
||||
return func(c *Options) {
|
||||
c.extractors[format] = extractor
|
||||
}
|
||||
}
|
||||
|
||||
func applyOptions(options ...Option) Options {
|
||||
opts := Options{
|
||||
injectors: make(map[interface{}]jaeger.Injector),
|
||||
extractors: make(map[interface{}]jaeger.Extractor),
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&opts)
|
||||
}
|
||||
if opts.metrics == nil {
|
||||
opts.metrics = metrics.NullFactory
|
||||
}
|
||||
if opts.logger == nil {
|
||||
opts.logger = jaeger.NullLogger
|
||||
}
|
||||
return opts
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
const (
|
||||
// JaegerClientVersion is the version of the client library reported as Span tag.
|
||||
JaegerClientVersion = "Go-2.15.0-dev"
|
||||
|
||||
// JaegerClientVersionTagKey is the name of the tag used to report client version.
|
||||
JaegerClientVersionTagKey = "jaeger.version"
|
||||
|
||||
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
|
||||
// if found in the carrier, forces the trace to be sampled as "debug" trace.
|
||||
// The value of the header is recorded as the tag on the root span, so that the
|
||||
// trace can be found in the UI using this value as a correlation ID.
|
||||
JaegerDebugHeader = "jaeger-debug-id"
|
||||
|
||||
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
|
||||
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
|
||||
// a root span does not exist.
|
||||
JaegerBaggageHeader = "jaeger-baggage"
|
||||
|
||||
// TracerHostnameTagKey used to report host name of the process.
|
||||
TracerHostnameTagKey = "hostname"
|
||||
|
||||
// TracerIPTagKey used to report ip of the process.
|
||||
TracerIPTagKey = "ip"
|
||||
|
||||
// TracerUUIDTagKey used to report UUID of the client process.
|
||||
TracerUUIDTagKey = "client-uuid"
|
||||
|
||||
// SamplerTypeTagKey reports which sampler was used on the root span.
|
||||
SamplerTypeTagKey = "sampler.type"
|
||||
|
||||
// SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
|
||||
SamplerParamTagKey = "sampler.param"
|
||||
|
||||
// TraceContextHeaderName is the http header name used to propagate tracing context.
|
||||
// This must be in lower-case to avoid mismatches when decoding incoming headers.
|
||||
TraceContextHeaderName = "uber-trace-id"
|
||||
|
||||
// TracerStateHeaderName is deprecated.
|
||||
// Deprecated: use TraceContextHeaderName
|
||||
TracerStateHeaderName = TraceContextHeaderName
|
||||
|
||||
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
|
||||
// This must be in lower-case to avoid mismatches when decoding incoming headers.
|
||||
TraceBaggageHeaderPrefix = "uberctx-"
|
||||
|
||||
// SamplerTypeConst is the type of sampler that always makes the same decision.
|
||||
SamplerTypeConst = "const"
|
||||
|
||||
// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
|
||||
SamplerTypeRemote = "remote"
|
||||
|
||||
// SamplerTypeProbabilistic is the type of sampler that samples traces
|
||||
// with a certain fixed probability.
|
||||
SamplerTypeProbabilistic = "probabilistic"
|
||||
|
||||
// SamplerTypeRateLimiting is the type of sampler that samples
|
||||
// only up to a fixed number of traces per second.
|
||||
SamplerTypeRateLimiting = "ratelimiting"
|
||||
|
||||
// SamplerTypeLowerBound is the type of sampler that samples
|
||||
// at least a fixed number of traces per second.
|
||||
SamplerTypeLowerBound = "lowerbound"
|
||||
|
||||
// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
|
||||
DefaultUDPSpanServerHost = "localhost"
|
||||
|
||||
// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
|
||||
DefaultUDPSpanServerPort = 6831
|
||||
|
||||
// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
|
||||
DefaultMaxTagValueLength = 256
|
||||
)
|
|
@ -1,258 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
flagSampled = byte(1)
|
||||
flagDebug = byte(2)
|
||||
)
|
||||
|
||||
var (
|
||||
errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
|
||||
errMalformedTracerStateString = errors.New("String does not match tracer state format")
|
||||
|
||||
emptyContext = SpanContext{}
|
||||
)
|
||||
|
||||
// TraceID represents unique 128bit identifier of a trace
|
||||
type TraceID struct {
|
||||
High, Low uint64
|
||||
}
|
||||
|
||||
// SpanID represents unique 64bit identifier of a span
|
||||
type SpanID uint64
|
||||
|
||||
// SpanContext represents propagated span identity and state
|
||||
type SpanContext struct {
|
||||
// traceID represents globally unique ID of the trace.
|
||||
// Usually generated as a random number.
|
||||
traceID TraceID
|
||||
|
||||
// spanID represents span ID that must be unique within its trace,
|
||||
// but does not have to be globally unique.
|
||||
spanID SpanID
|
||||
|
||||
// parentID refers to the ID of the parent span.
|
||||
// Should be 0 if the current span is a root span.
|
||||
parentID SpanID
|
||||
|
||||
// flags is a bitmap containing such bits as 'sampled' and 'debug'.
|
||||
flags byte
|
||||
|
||||
// Distributed Context baggage. The is a snapshot in time.
|
||||
baggage map[string]string
|
||||
|
||||
// debugID can be set to some correlation ID when the context is being
|
||||
// extracted from a TextMap carrier.
|
||||
//
|
||||
// See JaegerDebugHeader in constants.go
|
||||
debugID string
|
||||
}
|
||||
|
||||
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
|
||||
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
|
||||
for k, v := range c.baggage {
|
||||
if !handler(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsSampled returns whether this trace was chosen for permanent storage
|
||||
// by the sampling mechanism of the tracer.
|
||||
func (c SpanContext) IsSampled() bool {
|
||||
return (c.flags & flagSampled) == flagSampled
|
||||
}
|
||||
|
||||
// IsDebug indicates whether sampling was explicitly requested by the service.
|
||||
func (c SpanContext) IsDebug() bool {
|
||||
return (c.flags & flagDebug) == flagDebug
|
||||
}
|
||||
|
||||
// IsValid indicates whether this context actually represents a valid trace.
|
||||
func (c SpanContext) IsValid() bool {
|
||||
return c.traceID.IsValid() && c.spanID != 0
|
||||
}
|
||||
|
||||
func (c SpanContext) String() string {
|
||||
if c.traceID.High == 0 {
|
||||
return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
|
||||
}
|
||||
return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
|
||||
}
|
||||
|
||||
// ContextFromString reconstructs the Context encoded in a string
|
||||
func ContextFromString(value string) (SpanContext, error) {
|
||||
var context SpanContext
|
||||
if value == "" {
|
||||
return emptyContext, errEmptyTracerStateString
|
||||
}
|
||||
parts := strings.Split(value, ":")
|
||||
if len(parts) != 4 {
|
||||
return emptyContext, errMalformedTracerStateString
|
||||
}
|
||||
var err error
|
||||
if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
|
||||
return emptyContext, err
|
||||
}
|
||||
if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
|
||||
return emptyContext, err
|
||||
}
|
||||
if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
|
||||
return emptyContext, err
|
||||
}
|
||||
flags, err := strconv.ParseUint(parts[3], 10, 8)
|
||||
if err != nil {
|
||||
return emptyContext, err
|
||||
}
|
||||
context.flags = byte(flags)
|
||||
return context, nil
|
||||
}
|
||||
|
||||
// TraceID returns the trace ID of this span context
|
||||
func (c SpanContext) TraceID() TraceID {
|
||||
return c.traceID
|
||||
}
|
||||
|
||||
// SpanID returns the span ID of this span context
|
||||
func (c SpanContext) SpanID() SpanID {
|
||||
return c.spanID
|
||||
}
|
||||
|
||||
// ParentID returns the parent span ID of this span context
|
||||
func (c SpanContext) ParentID() SpanID {
|
||||
return c.parentID
|
||||
}
|
||||
|
||||
// NewSpanContext creates a new instance of SpanContext
|
||||
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
|
||||
flags := byte(0)
|
||||
if sampled {
|
||||
flags = flagSampled
|
||||
}
|
||||
return SpanContext{
|
||||
traceID: traceID,
|
||||
spanID: spanID,
|
||||
parentID: parentID,
|
||||
flags: flags,
|
||||
baggage: baggage}
|
||||
}
|
||||
|
||||
// CopyFrom copies data from ctx into this context, including span identity and baggage.
|
||||
// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
|
||||
func (c *SpanContext) CopyFrom(ctx *SpanContext) {
|
||||
c.traceID = ctx.traceID
|
||||
c.spanID = ctx.spanID
|
||||
c.parentID = ctx.parentID
|
||||
c.flags = ctx.flags
|
||||
if l := len(ctx.baggage); l > 0 {
|
||||
c.baggage = make(map[string]string, l)
|
||||
for k, v := range ctx.baggage {
|
||||
c.baggage[k] = v
|
||||
}
|
||||
} else {
|
||||
c.baggage = nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBaggageItem creates a new context with an extra baggage item.
|
||||
func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
|
||||
var newBaggage map[string]string
|
||||
if c.baggage == nil {
|
||||
newBaggage = map[string]string{key: value}
|
||||
} else {
|
||||
newBaggage = make(map[string]string, len(c.baggage)+1)
|
||||
for k, v := range c.baggage {
|
||||
newBaggage[k] = v
|
||||
}
|
||||
newBaggage[key] = value
|
||||
}
|
||||
// Use positional parameters so the compiler will help catch new fields.
|
||||
return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
|
||||
}
|
||||
|
||||
// isDebugIDContainerOnly returns true when the instance of the context is only
|
||||
// used to return the debug/correlation ID from extract() method. This happens
|
||||
// in the situation when "jaeger-debug-id" header is passed in the carrier to
|
||||
// the extract() method, but the request otherwise has no span context in it.
|
||||
// Previously this would've returned opentracing.ErrSpanContextNotFound from the
|
||||
// extract method, but now it returns a dummy context with only debugID filled in.
|
||||
//
|
||||
// See JaegerDebugHeader in constants.go
|
||||
// See textMapPropagator#Extract
|
||||
func (c *SpanContext) isDebugIDContainerOnly() bool {
|
||||
return !c.traceID.IsValid() && c.debugID != ""
|
||||
}
|
||||
|
||||
// ------- TraceID -------
|
||||
|
||||
func (t TraceID) String() string {
|
||||
if t.High == 0 {
|
||||
return fmt.Sprintf("%x", t.Low)
|
||||
}
|
||||
return fmt.Sprintf("%x%016x", t.High, t.Low)
|
||||
}
|
||||
|
||||
// TraceIDFromString creates a TraceID from a hexadecimal string
|
||||
func TraceIDFromString(s string) (TraceID, error) {
|
||||
var hi, lo uint64
|
||||
var err error
|
||||
if len(s) > 32 {
|
||||
return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
|
||||
} else if len(s) > 16 {
|
||||
hiLen := len(s) - 16
|
||||
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
|
||||
return TraceID{}, err
|
||||
}
|
||||
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
|
||||
return TraceID{}, err
|
||||
}
|
||||
} else {
|
||||
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
|
||||
return TraceID{}, err
|
||||
}
|
||||
}
|
||||
return TraceID{High: hi, Low: lo}, nil
|
||||
}
|
||||
|
||||
// IsValid checks if the trace ID is valid, i.e. not zero.
|
||||
func (t TraceID) IsValid() bool {
|
||||
return t.High != 0 || t.Low != 0
|
||||
}
|
||||
|
||||
// ------- SpanID -------
|
||||
|
||||
func (s SpanID) String() string {
|
||||
return fmt.Sprintf("%x", uint64(s))
|
||||
}
|
||||
|
||||
// SpanIDFromString creates a SpanID from a hexadecimal string
|
||||
func SpanIDFromString(s string) (SpanID, error) {
|
||||
if len(s) > 16 {
|
||||
return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
|
||||
}
|
||||
id, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return SpanID(0), err
|
||||
}
|
||||
return SpanID(id), nil
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// ContribObserver can be registered with the Tracer to receive notifications
|
||||
// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
|
||||
type ContribObserver interface {
|
||||
// Create and return a span observer. Called when a span starts.
|
||||
// If the Observer is not interested in the given span, it must return (nil, false).
|
||||
// E.g :
|
||||
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
|
||||
// var sp opentracing.Span
|
||||
// sso := opentracing.StartSpanOptions{}
|
||||
// if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
|
||||
// // we have a valid SpanObserver
|
||||
// }
|
||||
// ...
|
||||
// }
|
||||
OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
|
||||
}
|
||||
|
||||
// ContribSpanObserver is created by the Observer and receives notifications
|
||||
// about other Span events. This interface is meant to match
|
||||
// github.com/opentracing-contrib/go-observer, via duck typing, without
|
||||
// directly importing the go-observer package.
|
||||
type ContribSpanObserver interface {
|
||||
OnSetOperationName(operationName string)
|
||||
OnSetTag(key string, value interface{})
|
||||
OnFinish(options opentracing.FinishOptions)
|
||||
}
|
||||
|
||||
// wrapper observer for the old observers (see observer.go)
|
||||
type oldObserver struct {
|
||||
obs Observer
|
||||
}
|
||||
|
||||
func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
|
||||
spanObserver := o.obs.OnStartSpan(operationName, options)
|
||||
return spanObserver, spanObserver != nil
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
|
||||
It is currently using Zipkin-compatible data model and can be directly
|
||||
itegrated with Zipkin backend (http://zipkin.io).
|
||||
|
||||
For integration instructions please refer to the README:
|
||||
|
||||
https://github.com/uber/jaeger-client-go/blob/master/README.md
|
||||
*/
|
||||
package jaeger
|
|
@ -1,89 +0,0 @@
|
|||
hash: 3accf84f97bff4a91162736104c0e9b9790820712bd86db6fec5e665f7196a82
|
||||
updated: 2018-04-30T11:46:43.804556-04:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 3a771d992973f24aa725d07868b467d1ddfceafb
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/codahale/hdrhistogram
|
||||
version: 3a0bb77429bd3a61596f5e8a3172445844342120
|
||||
- name: github.com/crossdock/crossdock-go
|
||||
version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/golang/protobuf
|
||||
version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/matttproud/golang_protobuf_extensions
|
||||
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
subpackages:
|
||||
- pbutil
|
||||
- name: github.com/opentracing/opentracing-go
|
||||
version: 1949ddbfd147afd4d964a9f00b24eb291e0e7c38
|
||||
subpackages:
|
||||
- ext
|
||||
- log
|
||||
- name: github.com/pkg/errors
|
||||
version: 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
- name: github.com/pmezard/go-difflib
|
||||
version: 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
subpackages:
|
||||
- difflib
|
||||
- name: github.com/prometheus/client_golang
|
||||
version: c5b7fccd204277076155f10851dad72b76a49317
|
||||
subpackages:
|
||||
- prometheus
|
||||
- name: github.com/prometheus/client_model
|
||||
version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
- model
|
||||
- name: github.com/prometheus/procfs
|
||||
version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea
|
||||
subpackages:
|
||||
- internal/util
|
||||
- nfs
|
||||
- xfs
|
||||
- name: github.com/stretchr/testify
|
||||
version: 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
- suite
|
||||
- name: github.com/uber/jaeger-lib
|
||||
version: 4267858c0679cd4e47cefed8d7f70fd386cfb567
|
||||
subpackages:
|
||||
- metrics
|
||||
- metrics/prometheus
|
||||
- metrics/testutils
|
||||
- name: go.uber.org/atomic
|
||||
version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
|
||||
- name: go.uber.org/multierr
|
||||
version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
|
||||
- name: go.uber.org/zap
|
||||
version: eeedf312bc6c57391d84767a4cd413f02a917974
|
||||
subpackages:
|
||||
- buffer
|
||||
- internal/bufferpool
|
||||
- internal/color
|
||||
- internal/exit
|
||||
- zapcore
|
||||
- name: golang.org/x/net
|
||||
version: 6078986fec03a1dcc236c34816c71b0e05018fda
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
testImports:
|
||||
- name: github.com/uber-go/atomic
|
||||
version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
|
|
@ -1,22 +0,0 @@
|
|||
package: github.com/uber/jaeger-client-go
|
||||
import:
|
||||
- package: github.com/opentracing/opentracing-go
|
||||
version: ^1
|
||||
subpackages:
|
||||
- ext
|
||||
- log
|
||||
- package: github.com/crossdock/crossdock-go
|
||||
- package: github.com/uber/jaeger-lib
|
||||
version: ^1.2.1
|
||||
subpackages:
|
||||
- metrics
|
||||
- package: github.com/pkg/errors
|
||||
version: ~0.8.0
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
- suite
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: v0.8.0
|
|
@ -1,64 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
// HeadersConfig contains the values for the header keys that Jaeger will use.
|
||||
// These values may be either custom or default depending on whether custom
|
||||
// values were provided via a configuration.
|
||||
type HeadersConfig struct {
|
||||
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
|
||||
// if found in the carrier, forces the trace to be sampled as "debug" trace.
|
||||
// The value of the header is recorded as the tag on the root span, so that the
|
||||
// trace can be found in the UI using this value as a correlation ID.
|
||||
JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
|
||||
|
||||
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
|
||||
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
|
||||
// a root span does not exist.
|
||||
JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
|
||||
|
||||
// TraceContextHeaderName is the http header name used to propagate tracing context.
|
||||
// This must be in lower-case to avoid mismatches when decoding incoming headers.
|
||||
TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
|
||||
|
||||
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
|
||||
// This must be in lower-case to avoid mismatches when decoding incoming headers.
|
||||
TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
|
||||
}
|
||||
|
||||
func (c *HeadersConfig) applyDefaults() *HeadersConfig {
|
||||
if c.JaegerBaggageHeader == "" {
|
||||
c.JaegerBaggageHeader = JaegerBaggageHeader
|
||||
}
|
||||
if c.JaegerDebugHeader == "" {
|
||||
c.JaegerDebugHeader = JaegerDebugHeader
|
||||
}
|
||||
if c.TraceBaggageHeaderPrefix == "" {
|
||||
c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
|
||||
}
|
||||
if c.TraceContextHeaderName == "" {
|
||||
c.TraceContextHeaderName = TraceContextHeaderName
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func getDefaultHeadersConfig() *HeadersConfig {
|
||||
return &HeadersConfig{
|
||||
JaegerDebugHeader: JaegerDebugHeader,
|
||||
JaegerBaggageHeader: JaegerBaggageHeader,
|
||||
TraceContextHeaderName: TraceContextHeaderName,
|
||||
TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxValueLength = 2048
|
||||
defaultRefreshInterval = time.Minute
|
||||
defaultHostPort = "localhost:5778"
|
||||
)
|
||||
|
||||
// Option is a function that sets some option on the RestrictionManager
|
||||
type Option func(options *options)
|
||||
|
||||
// Options is a factory for all available options
|
||||
var Options options
|
||||
|
||||
type options struct {
|
||||
denyBaggageOnInitializationFailure bool
|
||||
metrics *jaeger.Metrics
|
||||
logger jaeger.Logger
|
||||
hostPort string
|
||||
refreshInterval time.Duration
|
||||
}
|
||||
|
||||
// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
|
||||
// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
|
||||
// restrictions have been retrieved from agent.
|
||||
// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
|
||||
// restrictions have been retrieved from agent.
|
||||
func (options) DenyBaggageOnInitializationFailure(b bool) Option {
|
||||
return func(o *options) {
|
||||
o.denyBaggageOnInitializationFailure = b
|
||||
}
|
||||
}
|
||||
|
||||
// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
|
||||
func (options) Metrics(m *jaeger.Metrics) Option {
|
||||
return func(o *options) {
|
||||
o.metrics = m
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates an Option that sets the logger used by the RestrictionManager.
|
||||
func (options) Logger(logger jaeger.Logger) Option {
|
||||
return func(o *options) {
|
||||
o.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
|
||||
func (options) HostPort(hostPort string) Option {
|
||||
return func(o *options) {
|
||||
o.hostPort = hostPort
|
||||
}
|
||||
}
|
||||
|
||||
// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
|
||||
// the baggage restrictions.
|
||||
func (options) RefreshInterval(refreshInterval time.Duration) Option {
|
||||
return func(o *options) {
|
||||
o.refreshInterval = refreshInterval
|
||||
}
|
||||
}
|
||||
|
||||
func applyOptions(o ...Option) options {
|
||||
opts := options{}
|
||||
for _, option := range o {
|
||||
option(&opts)
|
||||
}
|
||||
if opts.metrics == nil {
|
||||
opts.metrics = jaeger.NewNullMetrics()
|
||||
}
|
||||
if opts.logger == nil {
|
||||
opts.logger = jaeger.NullLogger
|
||||
}
|
||||
if opts.hostPort == "" {
|
||||
opts.hostPort = defaultHostPort
|
||||
}
|
||||
if opts.refreshInterval == 0 {
|
||||
opts.refreshInterval = defaultRefreshInterval
|
||||
}
|
||||
return opts
|
||||
}
|
157
vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
generated
vendored
157
vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
generated
vendored
|
@ -1,157 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go/internal/baggage"
|
||||
thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
|
||||
"github.com/uber/jaeger-client-go/utils"
|
||||
)
|
||||
|
||||
type httpBaggageRestrictionManagerProxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
|
||||
v := url.Values{}
|
||||
v.Set("service", serviceName)
|
||||
return &httpBaggageRestrictionManagerProxy{
|
||||
url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
|
||||
var out []*thrift.BaggageRestriction
|
||||
if err := utils.GetJSON(s.url, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
|
||||
type RestrictionManager struct {
|
||||
options
|
||||
|
||||
mux sync.RWMutex
|
||||
serviceName string
|
||||
restrictions map[string]*baggage.Restriction
|
||||
thriftProxy thrift.BaggageRestrictionManager
|
||||
pollStopped sync.WaitGroup
|
||||
stopPoll chan struct{}
|
||||
invalidRestriction *baggage.Restriction
|
||||
validRestriction *baggage.Restriction
|
||||
|
||||
// Determines if the manager has successfully retrieved baggage restrictions from agent
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
|
||||
// baggage restrictions.
|
||||
func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
|
||||
// TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
|
||||
// restrictionsMap will need to exist per service
|
||||
opts := applyOptions(options...)
|
||||
m := &RestrictionManager{
|
||||
serviceName: serviceName,
|
||||
options: opts,
|
||||
restrictions: make(map[string]*baggage.Restriction),
|
||||
thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
|
||||
stopPoll: make(chan struct{}),
|
||||
invalidRestriction: baggage.NewRestriction(false, 0),
|
||||
validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
|
||||
}
|
||||
m.pollStopped.Add(1)
|
||||
go m.pollManager()
|
||||
return m
|
||||
}
|
||||
|
||||
// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
|
||||
func (m *RestrictionManager) isReady() bool {
|
||||
m.mux.RLock()
|
||||
defer m.mux.RUnlock()
|
||||
return m.initialized
|
||||
}
|
||||
|
||||
// GetRestriction implements RestrictionManager#GetRestriction.
|
||||
func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
|
||||
m.mux.RLock()
|
||||
defer m.mux.RUnlock()
|
||||
if !m.initialized {
|
||||
if m.denyBaggageOnInitializationFailure {
|
||||
return m.invalidRestriction
|
||||
}
|
||||
return m.validRestriction
|
||||
}
|
||||
if restriction, ok := m.restrictions[key]; ok {
|
||||
return restriction
|
||||
}
|
||||
return m.invalidRestriction
|
||||
}
|
||||
|
||||
// Close stops remote polling and closes the RemoteRestrictionManager.
|
||||
func (m *RestrictionManager) Close() error {
|
||||
close(m.stopPoll)
|
||||
m.pollStopped.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RestrictionManager) pollManager() {
|
||||
defer m.pollStopped.Done()
|
||||
// attempt to initialize baggage restrictions
|
||||
if err := m.updateRestrictions(); err != nil {
|
||||
m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
|
||||
}
|
||||
ticker := time.NewTicker(m.refreshInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := m.updateRestrictions(); err != nil {
|
||||
m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
|
||||
}
|
||||
case <-m.stopPoll:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RestrictionManager) updateRestrictions() error {
|
||||
restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
|
||||
if err != nil {
|
||||
m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
|
||||
return err
|
||||
}
|
||||
newRestrictions := m.parseRestrictions(restrictions)
|
||||
m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
|
||||
m.mux.Lock()
|
||||
defer m.mux.Unlock()
|
||||
m.initialized = true
|
||||
m.restrictions = newRestrictions
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
|
||||
setters := make(map[string]*baggage.Restriction, len(restrictions))
|
||||
for _, restriction := range restrictions {
|
||||
setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
|
||||
}
|
||||
return setters
|
||||
}
|
71
vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
generated
vendored
71
vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package baggage
|
||||
|
||||
const (
|
||||
defaultMaxValueLength = 2048
|
||||
)
|
||||
|
||||
// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
|
||||
type Restriction struct {
|
||||
keyAllowed bool
|
||||
maxValueLength int
|
||||
}
|
||||
|
||||
// NewRestriction returns a new Restriction.
|
||||
func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
|
||||
return &Restriction{
|
||||
keyAllowed: keyAllowed,
|
||||
maxValueLength: maxValueLength,
|
||||
}
|
||||
}
|
||||
|
||||
// KeyAllowed returns whether the baggage key for this restriction is allowed.
|
||||
func (r *Restriction) KeyAllowed() bool {
|
||||
return r.keyAllowed
|
||||
}
|
||||
|
||||
// MaxValueLength returns the max length for the baggage value.
|
||||
func (r *Restriction) MaxValueLength() int {
|
||||
return r.maxValueLength
|
||||
}
|
||||
|
||||
// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
|
||||
// will return a Restriction for a specific baggage key which will determine whether the baggage
|
||||
// key is allowed for the current service and any other applicable restrictions on the baggage
|
||||
// value.
|
||||
type RestrictionManager interface {
|
||||
GetRestriction(service, key string) *Restriction
|
||||
}
|
||||
|
||||
// DefaultRestrictionManager allows any baggage key.
|
||||
type DefaultRestrictionManager struct {
|
||||
defaultRestriction *Restriction
|
||||
}
|
||||
|
||||
// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
|
||||
func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
|
||||
if maxValueLength == 0 {
|
||||
maxValueLength = defaultMaxValueLength
|
||||
}
|
||||
return &DefaultRestrictionManager{
|
||||
defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
|
||||
}
|
||||
}
|
||||
|
||||
// GetRestriction implements RestrictionManager#GetRestriction.
|
||||
func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
|
||||
return m.defaultRestriction
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package spanlog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
type fieldsAsMap map[string]string
|
||||
|
||||
// MaterializeWithJSON converts log Fields into JSON string
|
||||
// TODO refactor into pluggable materializer
|
||||
func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
|
||||
fields := fieldsAsMap(make(map[string]string, len(logFields)))
|
||||
for _, field := range logFields {
|
||||
field.Marshal(fields)
|
||||
}
|
||||
if event, ok := fields["event"]; ok && len(fields) == 1 {
|
||||
return []byte(event), nil
|
||||
}
|
||||
return json.Marshal(fields)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitString(key, value string) {
|
||||
ml[key] = value
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitBool(key string, value bool) {
|
||||
ml[key] = fmt.Sprintf("%t", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitInt(key string, value int) {
|
||||
ml[key] = fmt.Sprintf("%d", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitInt32(key string, value int32) {
|
||||
ml[key] = fmt.Sprintf("%d", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitInt64(key string, value int64) {
|
||||
ml[key] = fmt.Sprintf("%d", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
|
||||
ml[key] = fmt.Sprintf("%d", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
|
||||
ml[key] = fmt.Sprintf("%d", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
|
||||
ml[key] = fmt.Sprintf("%f", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
|
||||
ml[key] = fmt.Sprintf("%f", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
|
||||
ml[key] = fmt.Sprintf("%+v", value)
|
||||
}
|
||||
|
||||
func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
|
||||
value(ml)
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
// Copyright (c) 2018 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHostPort = "localhost:5778"
|
||||
defaultRefreshInterval = time.Second * 5
|
||||
)
|
||||
|
||||
// Option is a function that sets some option on the Throttler
|
||||
type Option func(options *options)
|
||||
|
||||
// Options is a factory for all available options
|
||||
var Options options
|
||||
|
||||
type options struct {
|
||||
metrics *jaeger.Metrics
|
||||
logger jaeger.Logger
|
||||
hostPort string
|
||||
refreshInterval time.Duration
|
||||
synchronousInitialization bool
|
||||
}
|
||||
|
||||
// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
|
||||
func (options) Metrics(m *jaeger.Metrics) Option {
|
||||
return func(o *options) {
|
||||
o.metrics = m
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates an Option that sets the logger used by the Throttler.
|
||||
func (options) Logger(logger jaeger.Logger) Option {
|
||||
return func(o *options) {
|
||||
o.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
|
||||
func (options) HostPort(hostPort string) Option {
|
||||
return func(o *options) {
|
||||
o.hostPort = hostPort
|
||||
}
|
||||
}
|
||||
|
||||
// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
|
||||
// credits.
|
||||
func (options) RefreshInterval(refreshInterval time.Duration) Option {
|
||||
return func(o *options) {
|
||||
o.refreshInterval = refreshInterval
|
||||
}
|
||||
}
|
||||
|
||||
// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
|
||||
// fetch credits from the agent when an operation is seen for the first time. This should be set to true
|
||||
// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
|
||||
// such that sampling or throttling occurs.
|
||||
func (options) SynchronousInitialization(b bool) Option {
|
||||
return func(o *options) {
|
||||
o.synchronousInitialization = b
|
||||
}
|
||||
}
|
||||
|
||||
func applyOptions(o ...Option) options {
|
||||
opts := options{}
|
||||
for _, option := range o {
|
||||
option(&opts)
|
||||
}
|
||||
if opts.metrics == nil {
|
||||
opts.metrics = jaeger.NewNullMetrics()
|
||||
}
|
||||
if opts.logger == nil {
|
||||
opts.logger = jaeger.NullLogger
|
||||
}
|
||||
if opts.hostPort == "" {
|
||||
opts.hostPort = defaultHostPort
|
||||
}
|
||||
if opts.refreshInterval == 0 {
|
||||
opts.refreshInterval = defaultRefreshInterval
|
||||
}
|
||||
return opts
|
||||
}
|
216
vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
generated
vendored
216
vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
generated
vendored
|
@ -1,216 +0,0 @@
|
|||
// Copyright (c) 2018 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/uber/jaeger-client-go"
|
||||
"github.com/uber/jaeger-client-go/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// minimumCredits is the minimum amount of credits necessary to not be throttled.
|
||||
// i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
|
||||
minimumCredits = 1.0
|
||||
)
|
||||
|
||||
var (
|
||||
errorUUIDNotSet = errors.New("Throttler UUID must be set")
|
||||
)
|
||||
|
||||
type operationBalance struct {
|
||||
Operation string `json:"operation"`
|
||||
Balance float64 `json:"balance"`
|
||||
}
|
||||
|
||||
type creditResponse struct {
|
||||
Balances []operationBalance `json:"balances"`
|
||||
}
|
||||
|
||||
type httpCreditManagerProxy struct {
|
||||
hostPort string
|
||||
}
|
||||
|
||||
func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
|
||||
return &httpCreditManagerProxy{
|
||||
hostPort: hostPort,
|
||||
}
|
||||
}
|
||||
|
||||
// N.B. Operations list must not be empty.
|
||||
func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
|
||||
params := url.Values{}
|
||||
params.Set("service", serviceName)
|
||||
params.Set("uuid", uuid)
|
||||
for _, op := range operations {
|
||||
params.Add("operations", op)
|
||||
}
|
||||
var resp creditResponse
|
||||
if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to receive credits from agent")
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Throttler retrieves credits from agent and uses it to throttle operations.
|
||||
type Throttler struct {
|
||||
options
|
||||
|
||||
mux sync.RWMutex
|
||||
service string
|
||||
uuid atomic.Value
|
||||
creditManager *httpCreditManagerProxy
|
||||
credits map[string]float64 // map of operation->credits
|
||||
close chan struct{}
|
||||
stopped sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
|
||||
// the service.
|
||||
func NewThrottler(service string, options ...Option) *Throttler {
|
||||
opts := applyOptions(options...)
|
||||
creditManager := newHTTPCreditManagerProxy(opts.hostPort)
|
||||
t := &Throttler{
|
||||
options: opts,
|
||||
creditManager: creditManager,
|
||||
service: service,
|
||||
credits: make(map[string]float64),
|
||||
close: make(chan struct{}),
|
||||
}
|
||||
t.stopped.Add(1)
|
||||
go t.pollManager()
|
||||
return t
|
||||
}
|
||||
|
||||
// IsAllowed implements Throttler#IsAllowed.
|
||||
func (t *Throttler) IsAllowed(operation string) bool {
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
value, ok := t.credits[operation]
|
||||
if !ok || value == 0 {
|
||||
if !ok {
|
||||
// NOTE: This appears to be a no-op at first glance, but it stores
|
||||
// the operation key in the map. Necessary for functionality of
|
||||
// Throttler#operations method.
|
||||
t.credits[operation] = 0
|
||||
}
|
||||
if !t.synchronousInitialization {
|
||||
t.metrics.ThrottledDebugSpans.Inc(1)
|
||||
return false
|
||||
}
|
||||
// If it is the first time this operation is being checked, synchronously fetch
|
||||
// the credits.
|
||||
credits, err := t.fetchCredits([]string{operation})
|
||||
if err != nil {
|
||||
// Failed to receive credits from agent, try again next time
|
||||
t.logger.Error("Failed to fetch credits: " + err.Error())
|
||||
return false
|
||||
}
|
||||
if len(credits.Balances) == 0 {
|
||||
// This shouldn't happen but just in case
|
||||
return false
|
||||
}
|
||||
for _, opBalance := range credits.Balances {
|
||||
t.credits[opBalance.Operation] += opBalance.Balance
|
||||
}
|
||||
}
|
||||
return t.isAllowed(operation)
|
||||
}
|
||||
|
||||
// Close stops the throttler from fetching credits from remote.
|
||||
func (t *Throttler) Close() error {
|
||||
close(t.close)
|
||||
t.stopped.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
|
||||
// requests are made.
|
||||
func (t *Throttler) SetProcess(process jaeger.Process) {
|
||||
if process.UUID != "" {
|
||||
t.uuid.Store(process.UUID)
|
||||
}
|
||||
}
|
||||
|
||||
// N.B. This function must be called with the Write Lock
|
||||
func (t *Throttler) isAllowed(operation string) bool {
|
||||
credits := t.credits[operation]
|
||||
if credits < minimumCredits {
|
||||
t.metrics.ThrottledDebugSpans.Inc(1)
|
||||
return false
|
||||
}
|
||||
t.credits[operation] = credits - minimumCredits
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Throttler) pollManager() {
|
||||
defer t.stopped.Done()
|
||||
ticker := time.NewTicker(t.refreshInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
t.refreshCredits()
|
||||
case <-t.close:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Throttler) operations() []string {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
operations := make([]string, 0, len(t.credits))
|
||||
for op := range t.credits {
|
||||
operations = append(operations, op)
|
||||
}
|
||||
return operations
|
||||
}
|
||||
|
||||
func (t *Throttler) refreshCredits() {
|
||||
operations := t.operations()
|
||||
if len(operations) == 0 {
|
||||
return
|
||||
}
|
||||
newCredits, err := t.fetchCredits(operations)
|
||||
if err != nil {
|
||||
t.metrics.ThrottlerUpdateFailure.Inc(1)
|
||||
t.logger.Error("Failed to fetch credits: " + err.Error())
|
||||
return
|
||||
}
|
||||
t.metrics.ThrottlerUpdateSuccess.Inc(1)
|
||||
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
for _, opBalance := range newCredits.Balances {
|
||||
t.credits[opBalance.Operation] += opBalance.Balance
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
|
||||
uuid := t.uuid.Load()
|
||||
uuidStr, _ := uuid.(string)
|
||||
if uuid == nil || uuidStr == "" {
|
||||
return nil, errorUUIDNotSet
|
||||
}
|
||||
return t.creditManager.FetchCredits(uuidStr, t.service, operations)
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
// Copyright (c) 2018 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package throttler
|
||||
|
||||
// Throttler is used to rate limits operations. For example, given how debug spans
|
||||
// are always sampled, a throttler can be enabled per client to rate limit the amount
|
||||
// of debug spans a client can start.
|
||||
type Throttler interface {
|
||||
// IsAllowed determines whether the operation should be allowed and not be
|
||||
// throttled.
|
||||
IsAllowed(operation string) bool
|
||||
}
|
||||
|
||||
// DefaultThrottler doesn't throttle at all.
|
||||
type DefaultThrottler struct{}
|
||||
|
||||
// IsAllowed implements Throttler#IsAllowed.
|
||||
func (t DefaultThrottler) IsAllowed(operation string) bool {
|
||||
return true
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// TODO this file should not be needed after TChannel PR.
|
||||
|
||||
type formatKey int
|
||||
|
||||
// SpanContextFormat is a constant used as OpenTracing Format.
|
||||
// Requires *SpanContext as carrier.
|
||||
// This format is intended for interop with TChannel or other Zipkin-like tracers.
|
||||
const SpanContextFormat formatKey = iota
|
||||
|
||||
type jaegerTraceContextPropagator struct {
|
||||
tracer *Tracer
|
||||
}
|
||||
|
||||
func (p *jaegerTraceContextPropagator) Inject(
|
||||
ctx SpanContext,
|
||||
abstractCarrier interface{},
|
||||
) error {
|
||||
carrier, ok := abstractCarrier.(*SpanContext)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
carrier.CopyFrom(&ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
|
||||
carrier, ok := abstractCarrier.(*SpanContext)
|
||||
if !ok {
|
||||
return emptyContext, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
ctx := new(SpanContext)
|
||||
ctx.CopyFrom(carrier)
|
||||
return *ctx, nil
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
|
||||
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
|
||||
)
|
||||
|
||||
type tags []*j.Tag
|
||||
|
||||
// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
|
||||
func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
|
||||
fields := tags(make([]*j.Tag, 0, len(logFields)))
|
||||
for _, field := range logFields {
|
||||
field.Marshal(&fields)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func (t *tags) EmitString(key, value string) {
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
|
||||
}
|
||||
|
||||
func (t *tags) EmitBool(key string, value bool) {
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
|
||||
}
|
||||
|
||||
func (t *tags) EmitInt(key string, value int) {
|
||||
vLong := int64(value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
|
||||
}
|
||||
|
||||
func (t *tags) EmitInt32(key string, value int32) {
|
||||
vLong := int64(value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
|
||||
}
|
||||
|
||||
func (t *tags) EmitInt64(key string, value int64) {
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
|
||||
}
|
||||
|
||||
func (t *tags) EmitUint32(key string, value uint32) {
|
||||
vLong := int64(value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
|
||||
}
|
||||
|
||||
func (t *tags) EmitUint64(key string, value uint64) {
|
||||
vLong := int64(value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
|
||||
}
|
||||
|
||||
func (t *tags) EmitFloat32(key string, value float32) {
|
||||
vDouble := float64(value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
|
||||
}
|
||||
|
||||
func (t *tags) EmitFloat64(key string, value float64) {
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
|
||||
}
|
||||
|
||||
func (t *tags) EmitObject(key string, value interface{}) {
|
||||
vStr := fmt.Sprintf("%+v", value)
|
||||
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
|
||||
}
|
||||
|
||||
func (t *tags) EmitLazyLogger(value log.LazyLogger) {
|
||||
value(t)
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
|
||||
"github.com/uber/jaeger-client-go/utils"
|
||||
)
|
||||
|
||||
// BuildJaegerThrift builds jaeger span based on internal span.
|
||||
func BuildJaegerThrift(span *Span) *j.Span {
|
||||
span.Lock()
|
||||
defer span.Unlock()
|
||||
startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
|
||||
duration := span.duration.Nanoseconds() / int64(time.Microsecond)
|
||||
jaegerSpan := &j.Span{
|
||||
TraceIdLow: int64(span.context.traceID.Low),
|
||||
TraceIdHigh: int64(span.context.traceID.High),
|
||||
SpanId: int64(span.context.spanID),
|
||||
ParentSpanId: int64(span.context.parentID),
|
||||
OperationName: span.operationName,
|
||||
Flags: int32(span.context.flags),
|
||||
StartTime: startTime,
|
||||
Duration: duration,
|
||||
Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
|
||||
Logs: buildLogs(span.logs),
|
||||
References: buildReferences(span.references),
|
||||
}
|
||||
return jaegerSpan
|
||||
}
|
||||
|
||||
// BuildJaegerProcessThrift creates a thrift Process type.
|
||||
func BuildJaegerProcessThrift(span *Span) *j.Process {
|
||||
span.Lock()
|
||||
defer span.Unlock()
|
||||
return buildJaegerProcessThrift(span.tracer)
|
||||
}
|
||||
|
||||
func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
|
||||
process := &j.Process{
|
||||
ServiceName: tracer.serviceName,
|
||||
Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
|
||||
}
|
||||
if tracer.process.UUID != "" {
|
||||
process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
|
||||
}
|
||||
return process
|
||||
}
|
||||
|
||||
func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
|
||||
jTags := make([]*j.Tag, 0, len(tags))
|
||||
for _, tag := range tags {
|
||||
jTag := buildTag(&tag, maxTagValueLength)
|
||||
jTags = append(jTags, jTag)
|
||||
}
|
||||
return jTags
|
||||
}
|
||||
|
||||
func buildLogs(logs []opentracing.LogRecord) []*j.Log {
|
||||
jLogs := make([]*j.Log, 0, len(logs))
|
||||
for _, log := range logs {
|
||||
jLog := &j.Log{
|
||||
Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
|
||||
Fields: ConvertLogsToJaegerTags(log.Fields),
|
||||
}
|
||||
jLogs = append(jLogs, jLog)
|
||||
}
|
||||
return jLogs
|
||||
}
|
||||
|
||||
func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
|
||||
jTag := &j.Tag{Key: tag.key}
|
||||
switch value := tag.value.(type) {
|
||||
case string:
|
||||
vStr := truncateString(value, maxTagValueLength)
|
||||
jTag.VStr = &vStr
|
||||
jTag.VType = j.TagType_STRING
|
||||
case []byte:
|
||||
if len(value) > maxTagValueLength {
|
||||
value = value[:maxTagValueLength]
|
||||
}
|
||||
jTag.VBinary = value
|
||||
jTag.VType = j.TagType_BINARY
|
||||
case int:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case uint:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case int8:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case uint8:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case int16:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case uint16:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case int32:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case uint32:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case int64:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case uint64:
|
||||
vLong := int64(value)
|
||||
jTag.VLong = &vLong
|
||||
jTag.VType = j.TagType_LONG
|
||||
case float32:
|
||||
vDouble := float64(value)
|
||||
jTag.VDouble = &vDouble
|
||||
jTag.VType = j.TagType_DOUBLE
|
||||
case float64:
|
||||
vDouble := float64(value)
|
||||
jTag.VDouble = &vDouble
|
||||
jTag.VType = j.TagType_DOUBLE
|
||||
case bool:
|
||||
vBool := value
|
||||
jTag.VBool = &vBool
|
||||
jTag.VType = j.TagType_BOOL
|
||||
default:
|
||||
vStr := truncateString(stringify(value), maxTagValueLength)
|
||||
jTag.VStr = &vStr
|
||||
jTag.VType = j.TagType_STRING
|
||||
}
|
||||
return jTag
|
||||
}
|
||||
|
||||
func buildReferences(references []Reference) []*j.SpanRef {
|
||||
retMe := make([]*j.SpanRef, 0, len(references))
|
||||
for _, ref := range references {
|
||||
if ref.Type == opentracing.ChildOfRef {
|
||||
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
|
||||
} else if ref.Type == opentracing.FollowsFromRef {
|
||||
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
|
||||
}
|
||||
}
|
||||
return retMe
|
||||
}
|
||||
|
||||
func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
|
||||
return &j.SpanRef{
|
||||
RefType: refType,
|
||||
TraceIdLow: int64(ctx.traceID.Low),
|
||||
TraceIdHigh: int64(ctx.traceID.High),
|
||||
SpanId: int64(ctx.spanID),
|
||||
}
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Logger provides an abstract interface for logging from Reporters.
|
||||
// Applications can provide their own implementation of this interface to adapt
|
||||
// reporters logging to whatever logging library they prefer (stdlib log,
|
||||
// logrus, go-logging, etc).
|
||||
type Logger interface {
|
||||
// Error logs a message at error priority
|
||||
Error(msg string)
|
||||
|
||||
// Infof logs a message at info priority
|
||||
Infof(msg string, args ...interface{})
|
||||
}
|
||||
|
||||
// StdLogger is implementation of the Logger interface that delegates to default `log` package
|
||||
var StdLogger = &stdLogger{}
|
||||
|
||||
type stdLogger struct{}
|
||||
|
||||
func (l *stdLogger) Error(msg string) {
|
||||
log.Printf("ERROR: %s", msg)
|
||||
}
|
||||
|
||||
// Infof logs a message at info priority
|
||||
func (l *stdLogger) Infof(msg string, args ...interface{}) {
|
||||
log.Printf(msg, args...)
|
||||
}
|
||||
|
||||
// NullLogger is implementation of the Logger interface that is no-op
|
||||
var NullLogger = &nullLogger{}
|
||||
|
||||
type nullLogger struct{}
|
||||
|
||||
func (l *nullLogger) Error(msg string) {}
|
||||
func (l *nullLogger) Infof(msg string, args ...interface{}) {}
|
||||
|
||||
// BytesBufferLogger implements Logger backed by a bytes.Buffer.
|
||||
type BytesBufferLogger struct {
|
||||
mux sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
// Error implements Logger.
|
||||
func (l *BytesBufferLogger) Error(msg string) {
|
||||
l.mux.Lock()
|
||||
l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
|
||||
l.mux.Unlock()
|
||||
}
|
||||
|
||||
// Infof implements Logger.
|
||||
func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
|
||||
l.mux.Lock()
|
||||
l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
|
||||
l.mux.Unlock()
|
||||
}
|
||||
|
||||
// String returns string representation of the underlying buffer.
|
||||
func (l *BytesBufferLogger) String() string {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
return l.buf.String()
|
||||
}
|
||||
|
||||
// Flush empties the underlying buffer.
|
||||
func (l *BytesBufferLogger) Flush() {
|
||||
l.mux.Lock()
|
||||
defer l.mux.Unlock()
|
||||
l.buf.Reset()
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import "log"
|
||||
|
||||
// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
|
||||
|
||||
// Logger provides an abstract interface for logging from Reporters.
|
||||
// Applications can provide their own implementation of this interface to adapt
|
||||
// reporters logging to whatever logging library they prefer (stdlib log,
|
||||
// logrus, go-logging, etc).
|
||||
type Logger interface {
|
||||
// Error logs a message at error priority
|
||||
Error(msg string)
|
||||
|
||||
// Infof logs a message at info priority
|
||||
Infof(msg string, args ...interface{})
|
||||
}
|
||||
|
||||
// StdLogger is implementation of the Logger interface that delegates to default `log` package
|
||||
var StdLogger = &stdLogger{}
|
||||
|
||||
type stdLogger struct{}
|
||||
|
||||
func (l *stdLogger) Error(msg string) {
|
||||
log.Printf("ERROR: %s", msg)
|
||||
}
|
||||
|
||||
// Infof logs a message at info priority
|
||||
func (l *stdLogger) Infof(msg string, args ...interface{}) {
|
||||
log.Printf(msg, args...)
|
||||
}
|
||||
|
||||
// NullLogger is implementation of the Logger interface that delegates to default `log` package
|
||||
var NullLogger = &nullLogger{}
|
||||
|
||||
type nullLogger struct{}
|
||||
|
||||
func (l *nullLogger) Error(msg string) {}
|
||||
func (l *nullLogger) Infof(msg string, args ...interface{}) {}
|
|
@ -1,107 +0,0 @@
|
|||
// Copyright (c) 2017-2018 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
)
|
||||
|
||||
// Metrics is a container of all stats emitted by Jaeger tracer.
|
||||
type Metrics struct {
|
||||
// Number of traces started by this tracer as sampled
|
||||
TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y"`
|
||||
|
||||
// Number of traces started by this tracer as not sampled
|
||||
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n"`
|
||||
|
||||
// Number of externally started sampled traces this tracer joined
|
||||
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y"`
|
||||
|
||||
// Number of externally started not-sampled traces this tracer joined
|
||||
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"`
|
||||
|
||||
// Number of sampled spans started by this tracer
|
||||
SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"`
|
||||
|
||||
// Number of unsampled spans started by this tracer
|
||||
SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"`
|
||||
|
||||
// Number of spans finished by this tracer
|
||||
SpansFinished metrics.Counter `metric:"finished_spans"`
|
||||
|
||||
// Number of errors decoding tracing context
|
||||
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"`
|
||||
|
||||
// Number of spans successfully reported
|
||||
ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"`
|
||||
|
||||
// Number of spans not reported due to a Sender failure
|
||||
ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"`
|
||||
|
||||
// Number of spans dropped due to internal queue overflow
|
||||
ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"`
|
||||
|
||||
// Current number of spans in the reporter queue
|
||||
ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"`
|
||||
|
||||
// Number of times the Sampler succeeded to retrieve sampling strategy
|
||||
SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"`
|
||||
|
||||
// Number of times the Sampler failed to retrieve sampling strategy
|
||||
SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"`
|
||||
|
||||
// Number of times the Sampler succeeded to retrieve and update sampling strategy
|
||||
SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"`
|
||||
|
||||
// Number of times the Sampler failed to update sampling strategy
|
||||
SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"`
|
||||
|
||||
// Number of times baggage was successfully written or updated on spans.
|
||||
BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"`
|
||||
|
||||
// Number of times baggage failed to write or update on spans.
|
||||
BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"`
|
||||
|
||||
// Number of times baggage was truncated as per baggage restrictions.
|
||||
BaggageTruncate metrics.Counter `metric:"baggage_truncations"`
|
||||
|
||||
// Number of times baggage restrictions were successfully updated.
|
||||
BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"`
|
||||
|
||||
// Number of times baggage restrictions failed to update.
|
||||
BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"`
|
||||
|
||||
// Number of times debug spans were throttled.
|
||||
ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans"`
|
||||
|
||||
// Number of times throttler successfully updated.
|
||||
ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok"`
|
||||
|
||||
// Number of times throttler failed to update.
|
||||
ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err"`
|
||||
}
|
||||
|
||||
// NewMetrics creates a new Metrics struct and initializes it.
|
||||
func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
|
||||
m := &Metrics{}
|
||||
// TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense)
|
||||
metrics.Init(m, factory.Namespace("jaeger", nil), globalTags)
|
||||
return m
|
||||
}
|
||||
|
||||
// NewNullMetrics creates a new Metrics struct that won't report any metrics.
|
||||
func NewNullMetrics() *Metrics {
|
||||
return NewMetrics(metrics.NullFactory, nil)
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import opentracing "github.com/opentracing/opentracing-go"
|
||||
|
||||
// Observer can be registered with the Tracer to receive notifications about
|
||||
// new Spans.
|
||||
//
|
||||
// Deprecated: use jaeger.ContribObserver instead.
|
||||
type Observer interface {
|
||||
OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
|
||||
}
|
||||
|
||||
// SpanObserver is created by the Observer and receives notifications about
|
||||
// other Span events.
|
||||
//
|
||||
// Deprecated: use jaeger.ContribSpanObserver instead.
|
||||
type SpanObserver interface {
|
||||
OnSetOperationName(operationName string)
|
||||
OnSetTag(key string, value interface{})
|
||||
OnFinish(options opentracing.FinishOptions)
|
||||
}
|
||||
|
||||
// compositeObserver is a dispatcher to other observers
|
||||
type compositeObserver struct {
|
||||
observers []ContribObserver
|
||||
}
|
||||
|
||||
// compositeSpanObserver is a dispatcher to other span observers
|
||||
type compositeSpanObserver struct {
|
||||
observers []ContribSpanObserver
|
||||
}
|
||||
|
||||
// noopSpanObserver is used when there are no observers registered
|
||||
// on the Tracer or none of them returns span observers from OnStartSpan.
|
||||
var noopSpanObserver = &compositeSpanObserver{}
|
||||
|
||||
func (o *compositeObserver) append(contribObserver ContribObserver) {
|
||||
o.observers = append(o.observers, contribObserver)
|
||||
}
|
||||
|
||||
func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
|
||||
var spanObservers []ContribSpanObserver
|
||||
for _, obs := range o.observers {
|
||||
spanObs, ok := obs.OnStartSpan(sp, operationName, options)
|
||||
if ok {
|
||||
if spanObservers == nil {
|
||||
spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
|
||||
}
|
||||
spanObservers = append(spanObservers, spanObs)
|
||||
}
|
||||
}
|
||||
if len(spanObservers) == 0 {
|
||||
return noopSpanObserver
|
||||
}
|
||||
return &compositeSpanObserver{observers: spanObservers}
|
||||
}
|
||||
|
||||
func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
|
||||
for _, obs := range o.observers {
|
||||
obs.OnSetOperationName(operationName)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
|
||||
for _, obs := range o.observers {
|
||||
obs.OnSetTag(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
|
||||
for _, obs := range o.observers {
|
||||
obs.OnFinish(options)
|
||||
}
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
// Copyright (c) 2018 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
// Process holds process specific metadata that's relevant to this client.
|
||||
type Process struct {
|
||||
Service string
|
||||
UUID string
|
||||
Tags []Tag
|
||||
}
|
||||
|
||||
// ProcessSetter sets a process. This can be used by any class that requires
|
||||
// the process to be set as part of initialization.
|
||||
// See internal/throttler/remote/throttler.go for an example.
|
||||
type ProcessSetter interface {
|
||||
SetProcess(process Process)
|
||||
}
|
|
@ -1,300 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// Injector is responsible for injecting SpanContext instances in a manner suitable
|
||||
// for propagation via a format-specific "carrier" object. Typically the
|
||||
// injection will take place across an RPC boundary, but message queues and
|
||||
// other IPC mechanisms are also reasonable places to use an Injector.
|
||||
type Injector interface {
|
||||
// Inject takes `SpanContext` and injects it into `carrier`. The actual type
|
||||
// of `carrier` depends on the `format` passed to `Tracer.Inject()`.
|
||||
//
|
||||
// Implementations may return opentracing.ErrInvalidCarrier or any other
|
||||
// implementation-specific error if injection fails.
|
||||
Inject(ctx SpanContext, carrier interface{}) error
|
||||
}
|
||||
|
||||
// Extractor is responsible for extracting SpanContext instances from a
|
||||
// format-specific "carrier" object. Typically the extraction will take place
|
||||
// on the server side of an RPC boundary, but message queues and other IPC
|
||||
// mechanisms are also reasonable places to use an Extractor.
|
||||
type Extractor interface {
|
||||
// Extract decodes a SpanContext instance from the given `carrier`,
|
||||
// or (nil, opentracing.ErrSpanContextNotFound) if no context could
|
||||
// be found in the `carrier`.
|
||||
Extract(carrier interface{}) (SpanContext, error)
|
||||
}
|
||||
|
||||
type textMapPropagator struct {
|
||||
headerKeys *HeadersConfig
|
||||
metrics Metrics
|
||||
encodeValue func(string) string
|
||||
decodeValue func(string) string
|
||||
}
|
||||
|
||||
func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
|
||||
return &textMapPropagator{
|
||||
headerKeys: headerKeys,
|
||||
metrics: metrics,
|
||||
encodeValue: func(val string) string {
|
||||
return val
|
||||
},
|
||||
decodeValue: func(val string) string {
|
||||
return val
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
|
||||
return &textMapPropagator{
|
||||
headerKeys: headerKeys,
|
||||
metrics: metrics,
|
||||
encodeValue: func(val string) string {
|
||||
return url.QueryEscape(val)
|
||||
},
|
||||
decodeValue: func(val string) string {
|
||||
// ignore decoding errors, cannot do anything about them
|
||||
if v, err := url.QueryUnescape(val); err == nil {
|
||||
return v
|
||||
}
|
||||
return val
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type binaryPropagator struct {
|
||||
tracer *Tracer
|
||||
buffers sync.Pool
|
||||
}
|
||||
|
||||
func newBinaryPropagator(tracer *Tracer) *binaryPropagator {
|
||||
return &binaryPropagator{
|
||||
tracer: tracer,
|
||||
buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textMapPropagator) Inject(
|
||||
sc SpanContext,
|
||||
abstractCarrier interface{},
|
||||
) error {
|
||||
textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
// Do not encode the string with trace context to avoid accidental double-encoding
|
||||
// if people are using opentracing < 0.10.0. Our colon-separated representation
|
||||
// of the trace context is already safe for HTTP headers.
|
||||
textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
|
||||
for k, v := range sc.baggage {
|
||||
safeKey := p.addBaggageKeyPrefix(k)
|
||||
safeVal := p.encodeValue(v)
|
||||
textMapWriter.Set(safeKey, safeVal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
|
||||
textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
|
||||
if !ok {
|
||||
return emptyContext, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
var ctx SpanContext
|
||||
var baggage map[string]string
|
||||
err := textMapReader.ForeachKey(func(rawKey, value string) error {
|
||||
key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
|
||||
if key == p.headerKeys.TraceContextHeaderName {
|
||||
var err error
|
||||
safeVal := p.decodeValue(value)
|
||||
if ctx, err = ContextFromString(safeVal); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if key == p.headerKeys.JaegerDebugHeader {
|
||||
ctx.debugID = p.decodeValue(value)
|
||||
} else if key == p.headerKeys.JaegerBaggageHeader {
|
||||
if baggage == nil {
|
||||
baggage = make(map[string]string)
|
||||
}
|
||||
for k, v := range p.parseCommaSeparatedMap(value) {
|
||||
baggage[k] = v
|
||||
}
|
||||
} else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
|
||||
if baggage == nil {
|
||||
baggage = make(map[string]string)
|
||||
}
|
||||
safeKey := p.removeBaggageKeyPrefix(key)
|
||||
safeVal := p.decodeValue(value)
|
||||
baggage[safeKey] = safeVal
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
p.metrics.DecodingErrors.Inc(1)
|
||||
return emptyContext, err
|
||||
}
|
||||
if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
|
||||
return emptyContext, opentracing.ErrSpanContextNotFound
|
||||
}
|
||||
ctx.baggage = baggage
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (p *binaryPropagator) Inject(
|
||||
sc SpanContext,
|
||||
abstractCarrier interface{},
|
||||
) error {
|
||||
carrier, ok := abstractCarrier.(io.Writer)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
// Handle the tracer context
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle the baggage items
|
||||
if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range sc.baggage {
|
||||
if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(carrier, k)
|
||||
if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(carrier, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
|
||||
carrier, ok := abstractCarrier.(io.Reader)
|
||||
if !ok {
|
||||
return emptyContext, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
var ctx SpanContext
|
||||
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
|
||||
// Handle the baggage items
|
||||
var numBaggage int32
|
||||
if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
|
||||
ctx.baggage = make(map[string]string, iNumBaggage)
|
||||
buf := p.buffers.Get().(*bytes.Buffer)
|
||||
defer p.buffers.Put(buf)
|
||||
|
||||
var keyLen, valLen int32
|
||||
for i := 0; i < iNumBaggage; i++ {
|
||||
if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
buf.Reset()
|
||||
buf.Grow(int(keyLen))
|
||||
if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
key := buf.String()
|
||||
|
||||
if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
buf.Reset()
|
||||
buf.Grow(int(valLen))
|
||||
if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
ctx.baggage[key] = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// Converts a comma separated key value pair list into a map
|
||||
// e.g. key1=value1, key2=value2, key3 = value3
|
||||
// is converted to map[string]string { "key1" : "value1",
|
||||
// "key2" : "value2",
|
||||
// "key3" : "value3" }
|
||||
func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
|
||||
baggage := make(map[string]string)
|
||||
value, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
log.Printf("Unable to unescape %s, %v", value, err)
|
||||
return baggage
|
||||
}
|
||||
for _, kvpair := range strings.Split(value, ",") {
|
||||
kv := strings.Split(strings.TrimSpace(kvpair), "=")
|
||||
if len(kv) == 2 {
|
||||
baggage[kv[0]] = kv[1]
|
||||
} else {
|
||||
log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
|
||||
}
|
||||
}
|
||||
return baggage
|
||||
}
|
||||
|
||||
// Converts a baggage item key into an http header format,
|
||||
// by prepending TraceBaggageHeaderPrefix and encoding the key string
|
||||
func (p *textMapPropagator) addBaggageKeyPrefix(key string) string {
|
||||
// TODO encodeBaggageKeyAsHeader add caching and escaping
|
||||
return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
|
||||
}
|
||||
|
||||
func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string {
|
||||
// TODO decodeBaggageHeaderKey add caching and escaping
|
||||
return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import "github.com/opentracing/opentracing-go"
|
||||
|
||||
// Reference represents a causal reference to other Spans (via their SpanContext).
|
||||
type Reference struct {
|
||||
Type opentracing.SpanReferenceType
|
||||
Context SpanContext
|
||||
}
|
|
@ -1,289 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/uber/jaeger-client-go/log"
|
||||
)
|
||||
|
||||
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
|
||||
type Reporter interface {
|
||||
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
|
||||
Report(span *Span)
|
||||
|
||||
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
|
||||
Close()
|
||||
}
|
||||
|
||||
// ------------------------------
|
||||
|
||||
type nullReporter struct{}
|
||||
|
||||
// NewNullReporter creates a no-op reporter that ignores all reported spans.
|
||||
func NewNullReporter() Reporter {
|
||||
return &nullReporter{}
|
||||
}
|
||||
|
||||
// Report implements Report() method of Reporter by doing nothing.
|
||||
func (r *nullReporter) Report(span *Span) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by doing nothing.
|
||||
func (r *nullReporter) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// ------------------------------
|
||||
|
||||
type loggingReporter struct {
|
||||
logger Logger
|
||||
}
|
||||
|
||||
// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
|
||||
func NewLoggingReporter(logger Logger) Reporter {
|
||||
return &loggingReporter{logger}
|
||||
}
|
||||
|
||||
// Report implements Report() method of Reporter by logging the span to the logger.
|
||||
func (r *loggingReporter) Report(span *Span) {
|
||||
r.logger.Infof("Reporting span %+v", span)
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by doing nothing.
|
||||
func (r *loggingReporter) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// ------------------------------
|
||||
|
||||
// InMemoryReporter is used for testing, and simply collects spans in memory.
|
||||
type InMemoryReporter struct {
|
||||
spans []opentracing.Span
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// NewInMemoryReporter creates a reporter that stores spans in memory.
|
||||
// NOTE: the Tracer should be created with options.PoolSpans = false.
|
||||
func NewInMemoryReporter() *InMemoryReporter {
|
||||
return &InMemoryReporter{
|
||||
spans: make([]opentracing.Span, 0, 10),
|
||||
}
|
||||
}
|
||||
|
||||
// Report implements Report() method of Reporter by storing the span in the buffer.
|
||||
func (r *InMemoryReporter) Report(span *Span) {
|
||||
r.lock.Lock()
|
||||
r.spans = append(r.spans, span)
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by doing nothing.
|
||||
func (r *InMemoryReporter) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// SpansSubmitted returns the number of spans accumulated in the buffer.
|
||||
func (r *InMemoryReporter) SpansSubmitted() int {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
return len(r.spans)
|
||||
}
|
||||
|
||||
// GetSpans returns accumulated spans as a copy of the buffer.
|
||||
func (r *InMemoryReporter) GetSpans() []opentracing.Span {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
copied := make([]opentracing.Span, len(r.spans))
|
||||
copy(copied, r.spans)
|
||||
return copied
|
||||
}
|
||||
|
||||
// Reset clears all accumulated spans.
|
||||
func (r *InMemoryReporter) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.spans = nil
|
||||
}
|
||||
|
||||
// ------------------------------
|
||||
|
||||
type compositeReporter struct {
|
||||
reporters []Reporter
|
||||
}
|
||||
|
||||
// NewCompositeReporter creates a reporter that ignores all reported spans.
|
||||
func NewCompositeReporter(reporters ...Reporter) Reporter {
|
||||
return &compositeReporter{reporters: reporters}
|
||||
}
|
||||
|
||||
// Report implements Report() method of Reporter by delegating to each underlying reporter.
|
||||
func (r *compositeReporter) Report(span *Span) {
|
||||
for _, reporter := range r.reporters {
|
||||
reporter.Report(span)
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by closing each underlying reporter.
|
||||
func (r *compositeReporter) Close() {
|
||||
for _, reporter := range r.reporters {
|
||||
reporter.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// ------------- REMOTE REPORTER -----------------
|
||||
|
||||
type reporterQueueItemType int
|
||||
|
||||
const (
|
||||
defaultQueueSize = 100
|
||||
defaultBufferFlushInterval = 1 * time.Second
|
||||
|
||||
reporterQueueItemSpan reporterQueueItemType = iota
|
||||
reporterQueueItemClose
|
||||
)
|
||||
|
||||
type reporterQueueItem struct {
|
||||
itemType reporterQueueItemType
|
||||
span *Span
|
||||
close *sync.WaitGroup
|
||||
}
|
||||
|
||||
type remoteReporter struct {
|
||||
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
|
||||
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
|
||||
queueLength int64
|
||||
closed int64 // 0 - not closed, 1 - closed
|
||||
|
||||
reporterOptions
|
||||
|
||||
sender Transport
|
||||
queue chan reporterQueueItem
|
||||
}
|
||||
|
||||
// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
|
||||
// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
|
||||
// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
|
||||
// Calls to Close() block until all spans reported prior to the call to Close are flushed.
|
||||
func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
|
||||
options := reporterOptions{}
|
||||
for _, option := range opts {
|
||||
option(&options)
|
||||
}
|
||||
if options.bufferFlushInterval <= 0 {
|
||||
options.bufferFlushInterval = defaultBufferFlushInterval
|
||||
}
|
||||
if options.logger == nil {
|
||||
options.logger = log.NullLogger
|
||||
}
|
||||
if options.metrics == nil {
|
||||
options.metrics = NewNullMetrics()
|
||||
}
|
||||
if options.queueSize <= 0 {
|
||||
options.queueSize = defaultQueueSize
|
||||
}
|
||||
reporter := &remoteReporter{
|
||||
reporterOptions: options,
|
||||
sender: sender,
|
||||
queue: make(chan reporterQueueItem, options.queueSize),
|
||||
}
|
||||
go reporter.processQueue()
|
||||
return reporter
|
||||
}
|
||||
|
||||
// Report implements Report() method of Reporter.
|
||||
// It passes the span to a background go-routine for submission to Jaeger backend.
|
||||
// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
|
||||
// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
|
||||
// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
|
||||
// because some of them may still be successfully added to the queue.
|
||||
func (r *remoteReporter) Report(span *Span) {
|
||||
select {
|
||||
case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}:
|
||||
atomic.AddInt64(&r.queueLength, 1)
|
||||
default:
|
||||
r.metrics.ReporterDropped.Inc(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by waiting for the queue to be drained.
|
||||
func (r *remoteReporter) Close() {
|
||||
if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
|
||||
r.logger.Error("Repeated attempt to close the reporter is ignored")
|
||||
return
|
||||
}
|
||||
r.sendCloseEvent()
|
||||
r.sender.Close()
|
||||
}
|
||||
|
||||
func (r *remoteReporter) sendCloseEvent() {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
|
||||
|
||||
r.queue <- item // if the queue is full we will block until there is space
|
||||
atomic.AddInt64(&r.queueLength, 1)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
|
||||
// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
|
||||
// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
|
||||
// reporting new spans.
|
||||
func (r *remoteReporter) processQueue() {
|
||||
// flush causes the Sender to flush its accumulated spans and clear the buffer
|
||||
flush := func() {
|
||||
if flushed, err := r.sender.Flush(); err != nil {
|
||||
r.metrics.ReporterFailure.Inc(int64(flushed))
|
||||
r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error()))
|
||||
} else if flushed > 0 {
|
||||
r.metrics.ReporterSuccess.Inc(int64(flushed))
|
||||
}
|
||||
}
|
||||
|
||||
timer := time.NewTicker(r.bufferFlushInterval)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
flush()
|
||||
case item := <-r.queue:
|
||||
atomic.AddInt64(&r.queueLength, -1)
|
||||
switch item.itemType {
|
||||
case reporterQueueItemSpan:
|
||||
span := item.span
|
||||
if flushed, err := r.sender.Append(span); err != nil {
|
||||
r.metrics.ReporterFailure.Inc(int64(flushed))
|
||||
r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error()))
|
||||
} else if flushed > 0 {
|
||||
r.metrics.ReporterSuccess.Inc(int64(flushed))
|
||||
// to reduce the number of gauge stats, we only emit queue length on flush
|
||||
r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
|
||||
}
|
||||
case reporterQueueItemClose:
|
||||
timer.Stop()
|
||||
flush()
|
||||
item.close.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ReporterOption is a function that sets some option on the reporter.
|
||||
type ReporterOption func(c *reporterOptions)
|
||||
|
||||
// ReporterOptions is a factory for all available ReporterOption's
|
||||
var ReporterOptions reporterOptions
|
||||
|
||||
// reporterOptions control behavior of the reporter.
|
||||
type reporterOptions struct {
|
||||
// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
|
||||
queueSize int
|
||||
// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
|
||||
bufferFlushInterval time.Duration
|
||||
// logger is used to log errors of span submissions
|
||||
logger Logger
|
||||
// metrics is used to record runtime stats
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// QueueSize creates a ReporterOption that sets the size of the internal queue where
|
||||
// spans are stored before they are processed.
|
||||
func (reporterOptions) QueueSize(queueSize int) ReporterOption {
|
||||
return func(r *reporterOptions) {
|
||||
r.queueSize = queueSize
|
||||
}
|
||||
}
|
||||
|
||||
// Metrics creates a ReporterOption that initializes Metrics in the reporter,
|
||||
// which is used to record runtime statistics.
|
||||
func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
|
||||
return func(r *reporterOptions) {
|
||||
r.metrics = metrics
|
||||
}
|
||||
}
|
||||
|
||||
// BufferFlushInterval creates a ReporterOption that sets how often the queue
|
||||
// is force-flushed.
|
||||
func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
|
||||
return func(r *reporterOptions) {
|
||||
r.bufferFlushInterval = bufferFlushInterval
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates a ReporterOption that initializes the logger used to log
|
||||
// errors of span submissions.
|
||||
func (reporterOptions) Logger(logger Logger) ReporterOption {
|
||||
return func(r *reporterOptions) {
|
||||
r.logger = logger
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
An Observer that can be used to emit RPC metrics
|
||||
================================================
|
||||
|
||||
It can be attached to the tracer during tracer construction.
|
||||
See `ExampleObserver` function in [observer_test.go](./observer_test.go).
|
|
@ -1,16 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
|
||||
package rpcmetrics
|
|
@ -1,63 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpcmetrics
|
||||
|
||||
import "sync"
|
||||
|
||||
// normalizedEndpoints is a cache for endpointName -> safeName mappings.
|
||||
type normalizedEndpoints struct {
|
||||
names map[string]string
|
||||
maxSize int
|
||||
defaultName string
|
||||
normalizer NameNormalizer
|
||||
mux sync.RWMutex
|
||||
}
|
||||
|
||||
func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
|
||||
return &normalizedEndpoints{
|
||||
maxSize: maxSize,
|
||||
normalizer: normalizer,
|
||||
names: make(map[string]string, maxSize),
|
||||
}
|
||||
}
|
||||
|
||||
// normalize looks up the name in the cache, if not found it uses normalizer
|
||||
// to convert the name to a safe name. If called with more than maxSize unique
|
||||
// names it returns "" for all other names beyond those already cached.
|
||||
func (n *normalizedEndpoints) normalize(name string) string {
|
||||
n.mux.RLock()
|
||||
norm, ok := n.names[name]
|
||||
l := len(n.names)
|
||||
n.mux.RUnlock()
|
||||
if ok {
|
||||
return norm
|
||||
}
|
||||
if l >= n.maxSize {
|
||||
return ""
|
||||
}
|
||||
return n.normalizeWithLock(name)
|
||||
}
|
||||
|
||||
func (n *normalizedEndpoints) normalizeWithLock(name string) string {
|
||||
norm := n.normalizer.Normalize(name)
|
||||
n.mux.Lock()
|
||||
defer n.mux.Unlock()
|
||||
// cache may have grown while we were not holding the lock
|
||||
if len(n.names) >= n.maxSize {
|
||||
return ""
|
||||
}
|
||||
n.names[name] = norm
|
||||
return norm
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpcmetrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
otherEndpointsPlaceholder = "other"
|
||||
endpointNameMetricTag = "endpoint"
|
||||
)
|
||||
|
||||
// Metrics is a collection of metrics for an endpoint describing
|
||||
// throughput, success, errors, and performance.
|
||||
type Metrics struct {
|
||||
// RequestCountSuccess is a counter of the total number of successes.
|
||||
RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
|
||||
|
||||
// RequestCountFailures is a counter of the number of times any failure has been observed.
|
||||
RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
|
||||
|
||||
// RequestLatencySuccess is a latency histogram of succesful requests.
|
||||
RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
|
||||
|
||||
// RequestLatencyFailures is a latency histogram of failed requests.
|
||||
RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
|
||||
|
||||
// HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
|
||||
HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
|
||||
|
||||
// HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
|
||||
HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
|
||||
|
||||
// HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
|
||||
HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
|
||||
|
||||
// HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
|
||||
HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
|
||||
}
|
||||
|
||||
func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
|
||||
if statusCode >= 200 && statusCode < 300 {
|
||||
m.HTTPStatusCode2xx.Inc(1)
|
||||
} else if statusCode >= 300 && statusCode < 400 {
|
||||
m.HTTPStatusCode3xx.Inc(1)
|
||||
} else if statusCode >= 400 && statusCode < 500 {
|
||||
m.HTTPStatusCode4xx.Inc(1)
|
||||
} else if statusCode >= 500 && statusCode < 600 {
|
||||
m.HTTPStatusCode5xx.Inc(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
|
||||
// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
|
||||
// to a generic endpoint name "other".
|
||||
type MetricsByEndpoint struct {
|
||||
metricsFactory metrics.Factory
|
||||
endpoints *normalizedEndpoints
|
||||
metricsByEndpoint map[string]*Metrics
|
||||
mux sync.RWMutex
|
||||
}
|
||||
|
||||
func newMetricsByEndpoint(
|
||||
metricsFactory metrics.Factory,
|
||||
normalizer NameNormalizer,
|
||||
maxNumberOfEndpoints int,
|
||||
) *MetricsByEndpoint {
|
||||
return &MetricsByEndpoint{
|
||||
metricsFactory: metricsFactory,
|
||||
endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
|
||||
metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
|
||||
safeName := m.endpoints.normalize(endpoint)
|
||||
if safeName == "" {
|
||||
safeName = otherEndpointsPlaceholder
|
||||
}
|
||||
m.mux.RLock()
|
||||
met := m.metricsByEndpoint[safeName]
|
||||
m.mux.RUnlock()
|
||||
if met != nil {
|
||||
return met
|
||||
}
|
||||
|
||||
return m.getWithWriteLock(safeName)
|
||||
}
|
||||
|
||||
// split to make easier to test
|
||||
func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
|
||||
m.mux.Lock()
|
||||
defer m.mux.Unlock()
|
||||
|
||||
// it is possible that the name has been already registered after we released
|
||||
// the read lock and before we grabbed the write lock, so check for that.
|
||||
if met, ok := m.metricsByEndpoint[safeName]; ok {
|
||||
return met
|
||||
}
|
||||
|
||||
// it would be nice to create the struct before locking, since Init() is somewhat
|
||||
// expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
|
||||
met := &Metrics{}
|
||||
tags := map[string]string{endpointNameMetricTag: safeName}
|
||||
metrics.Init(met, m.metricsFactory, tags)
|
||||
|
||||
m.metricsByEndpoint[safeName] = met
|
||||
return met
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpcmetrics
|
||||
|
||||
// NameNormalizer is used to convert the endpoint names to strings
|
||||
// that can be safely used as tags in the metrics.
|
||||
type NameNormalizer interface {
|
||||
Normalize(name string) string
|
||||
}
|
||||
|
||||
// DefaultNameNormalizer converts endpoint names so that they contain only characters
|
||||
// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
|
||||
var DefaultNameNormalizer = &SimpleNameNormalizer{
|
||||
SafeSets: []SafeCharacterSet{
|
||||
&Range{From: 'a', To: 'z'},
|
||||
&Range{From: 'A', To: 'Z'},
|
||||
&Range{From: '0', To: '9'},
|
||||
&Char{'-'},
|
||||
&Char{'_'},
|
||||
&Char{'/'},
|
||||
&Char{'.'},
|
||||
},
|
||||
Replacement: '-',
|
||||
}
|
||||
|
||||
// SimpleNameNormalizer uses a set of safe character sets.
|
||||
type SimpleNameNormalizer struct {
|
||||
SafeSets []SafeCharacterSet
|
||||
Replacement byte
|
||||
}
|
||||
|
||||
// SafeCharacterSet determines if the given character is "safe"
|
||||
type SafeCharacterSet interface {
|
||||
IsSafe(c byte) bool
|
||||
}
|
||||
|
||||
// Range implements SafeCharacterSet
|
||||
type Range struct {
|
||||
From, To byte
|
||||
}
|
||||
|
||||
// IsSafe implements SafeCharacterSet
|
||||
func (r *Range) IsSafe(c byte) bool {
|
||||
return c >= r.From && c <= r.To
|
||||
}
|
||||
|
||||
// Char implements SafeCharacterSet
|
||||
type Char struct {
|
||||
Val byte
|
||||
}
|
||||
|
||||
// IsSafe implements SafeCharacterSet
|
||||
func (ch *Char) IsSafe(c byte) bool {
|
||||
return c == ch.Val
|
||||
}
|
||||
|
||||
// Normalize checks each character in the string against SafeSets,
|
||||
// and if it's not safe substitutes it with Replacement.
|
||||
func (n *SimpleNameNormalizer) Normalize(name string) string {
|
||||
var retMe []byte
|
||||
nameBytes := []byte(name)
|
||||
for i, b := range nameBytes {
|
||||
if n.safeByte(b) {
|
||||
if retMe != nil {
|
||||
retMe[i] = b
|
||||
}
|
||||
} else {
|
||||
if retMe == nil {
|
||||
retMe = make([]byte, len(nameBytes))
|
||||
copy(retMe[0:i], nameBytes[0:i])
|
||||
}
|
||||
retMe[i] = n.Replacement
|
||||
}
|
||||
}
|
||||
if retMe == nil {
|
||||
return name
|
||||
}
|
||||
return string(retMe)
|
||||
}
|
||||
|
||||
// safeByte checks if b against all safe charsets.
|
||||
func (n *SimpleNameNormalizer) safeByte(b byte) bool {
|
||||
for i := range n.SafeSets {
|
||||
if n.SafeSets[i].IsSafe(b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,171 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpcmetrics
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
|
||||
jaeger "github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
const defaultMaxNumberOfEndpoints = 200
|
||||
|
||||
// Observer is an observer that can emit RPC metrics.
|
||||
type Observer struct {
|
||||
metricsByEndpoint *MetricsByEndpoint
|
||||
}
|
||||
|
||||
// NewObserver creates a new observer that can emit RPC metrics.
|
||||
func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
|
||||
return &Observer{
|
||||
metricsByEndpoint: newMetricsByEndpoint(
|
||||
metricsFactory,
|
||||
normalizer,
|
||||
defaultMaxNumberOfEndpoints,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// OnStartSpan creates a new Observer for the span.
|
||||
func (o *Observer) OnStartSpan(
|
||||
operationName string,
|
||||
options opentracing.StartSpanOptions,
|
||||
) jaeger.SpanObserver {
|
||||
return NewSpanObserver(o.metricsByEndpoint, operationName, options)
|
||||
}
|
||||
|
||||
// SpanKind identifies the span as inboud, outbound, or internal
|
||||
type SpanKind int
|
||||
|
||||
const (
|
||||
// Local span kind
|
||||
Local SpanKind = iota
|
||||
// Inbound span kind
|
||||
Inbound
|
||||
// Outbound span kind
|
||||
Outbound
|
||||
)
|
||||
|
||||
// SpanObserver collects RPC metrics
|
||||
type SpanObserver struct {
|
||||
metricsByEndpoint *MetricsByEndpoint
|
||||
operationName string
|
||||
startTime time.Time
|
||||
mux sync.Mutex
|
||||
kind SpanKind
|
||||
httpStatusCode uint16
|
||||
err bool
|
||||
}
|
||||
|
||||
// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
|
||||
func NewSpanObserver(
|
||||
metricsByEndpoint *MetricsByEndpoint,
|
||||
operationName string,
|
||||
options opentracing.StartSpanOptions,
|
||||
) *SpanObserver {
|
||||
so := &SpanObserver{
|
||||
metricsByEndpoint: metricsByEndpoint,
|
||||
operationName: operationName,
|
||||
startTime: options.StartTime,
|
||||
}
|
||||
for k, v := range options.Tags {
|
||||
so.handleTagInLock(k, v)
|
||||
}
|
||||
return so
|
||||
}
|
||||
|
||||
// handleTags watches for special tags
|
||||
// - SpanKind
|
||||
// - HttpStatusCode
|
||||
// - Error
|
||||
func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
|
||||
if key == string(ext.SpanKind) {
|
||||
if v, ok := value.(ext.SpanKindEnum); ok {
|
||||
value = string(v)
|
||||
}
|
||||
if v, ok := value.(string); ok {
|
||||
if v == string(ext.SpanKindRPCClientEnum) {
|
||||
so.kind = Outbound
|
||||
} else if v == string(ext.SpanKindRPCServerEnum) {
|
||||
so.kind = Inbound
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
if key == string(ext.HTTPStatusCode) {
|
||||
if v, ok := value.(uint16); ok {
|
||||
so.httpStatusCode = v
|
||||
} else if v, ok := value.(int); ok {
|
||||
so.httpStatusCode = uint16(v)
|
||||
} else if v, ok := value.(string); ok {
|
||||
if vv, err := strconv.Atoi(v); err == nil {
|
||||
so.httpStatusCode = uint16(vv)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
if key == string(ext.Error) {
|
||||
if v, ok := value.(bool); ok {
|
||||
so.err = v
|
||||
} else if v, ok := value.(string); ok {
|
||||
if vv, err := strconv.ParseBool(v); err == nil {
|
||||
so.err = vv
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// OnFinish emits the RPC metrics. It only has an effect when operation name
|
||||
// is not blank, and the span kind is an RPC server.
|
||||
func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
|
||||
so.mux.Lock()
|
||||
defer so.mux.Unlock()
|
||||
|
||||
if so.operationName == "" || so.kind != Inbound {
|
||||
return
|
||||
}
|
||||
|
||||
mets := so.metricsByEndpoint.get(so.operationName)
|
||||
latency := options.FinishTime.Sub(so.startTime)
|
||||
if so.err {
|
||||
mets.RequestCountFailures.Inc(1)
|
||||
mets.RequestLatencyFailures.Record(latency)
|
||||
} else {
|
||||
mets.RequestCountSuccess.Inc(1)
|
||||
mets.RequestLatencySuccess.Record(latency)
|
||||
}
|
||||
mets.recordHTTPStatusCode(so.httpStatusCode)
|
||||
}
|
||||
|
||||
// OnSetOperationName records new operation name.
|
||||
func (so *SpanObserver) OnSetOperationName(operationName string) {
|
||||
so.mux.Lock()
|
||||
so.operationName = operationName
|
||||
so.mux.Unlock()
|
||||
}
|
||||
|
||||
// OnSetTag implements SpanObserver
|
||||
func (so *SpanObserver) OnSetTag(key string, value interface{}) {
|
||||
so.mux.Lock()
|
||||
so.handleTagInLock(key, value)
|
||||
so.mux.Unlock()
|
||||
}
|
|
@ -1,556 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go/log"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
|
||||
"github.com/uber/jaeger-client-go/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSamplingServerURL = "http://localhost:5778/sampling"
|
||||
defaultSamplingRefreshInterval = time.Minute
|
||||
defaultMaxOperations = 2000
|
||||
)
|
||||
|
||||
// Sampler decides whether a new trace should be sampled or not.
|
||||
type Sampler interface {
|
||||
// IsSampled decides whether a trace with given `id` and `operation`
|
||||
// should be sampled. This function will also return the tags that
|
||||
// can be used to identify the type of sampling that was applied to
|
||||
// the root span. Most simple samplers would return two tags,
|
||||
// sampler.type and sampler.param, similar to those used in the Configuration
|
||||
IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
|
||||
|
||||
// Close does a clean shutdown of the sampler, stopping any background
|
||||
// go-routines it may have started.
|
||||
Close()
|
||||
|
||||
// Equal checks if the `other` sampler is functionally equivalent
|
||||
// to this sampler.
|
||||
// TODO remove this function. This function is used to determine if 2 samplers are equivalent
|
||||
// which does not bode well with the adaptive sampler which has to create all the composite samplers
|
||||
// for the comparison to occur. This is expensive to do if only one sampler has changed.
|
||||
Equal(other Sampler) bool
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// ConstSampler is a sampler that always makes the same decision.
|
||||
type ConstSampler struct {
|
||||
Decision bool
|
||||
tags []Tag
|
||||
}
|
||||
|
||||
// NewConstSampler creates a ConstSampler.
|
||||
func NewConstSampler(sample bool) Sampler {
|
||||
tags := []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeConst},
|
||||
{key: SamplerParamTagKey, value: sample},
|
||||
}
|
||||
return &ConstSampler{Decision: sample, tags: tags}
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return s.Decision, s.tags
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *ConstSampler) Close() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *ConstSampler) Equal(other Sampler) bool {
|
||||
if o, ok := other.(*ConstSampler); ok {
|
||||
return s.Decision == o.Decision
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
|
||||
// of traces.
|
||||
type ProbabilisticSampler struct {
|
||||
samplingRate float64
|
||||
samplingBoundary uint64
|
||||
tags []Tag
|
||||
}
|
||||
|
||||
const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
|
||||
|
||||
// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
|
||||
// samplingRate, in the range between 0.0 and 1.0.
|
||||
//
|
||||
// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
|
||||
// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
|
||||
// TODO remove the error from this function for next major release
|
||||
func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
|
||||
if samplingRate < 0.0 || samplingRate > 1.0 {
|
||||
return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
|
||||
}
|
||||
return newProbabilisticSampler(samplingRate), nil
|
||||
}
|
||||
|
||||
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
|
||||
samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
|
||||
tags := []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
|
||||
{key: SamplerParamTagKey, value: samplingRate},
|
||||
}
|
||||
return &ProbabilisticSampler{
|
||||
samplingRate: samplingRate,
|
||||
samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
|
||||
tags: tags,
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingRate returns the sampling probability this sampled was constructed with.
|
||||
func (s *ProbabilisticSampler) SamplingRate() float64 {
|
||||
return s.samplingRate
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return s.samplingBoundary >= id.Low, s.tags
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *ProbabilisticSampler) Close() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *ProbabilisticSampler) Equal(other Sampler) bool {
|
||||
if o, ok := other.(*ProbabilisticSampler); ok {
|
||||
return s.samplingBoundary == o.samplingBoundary
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type rateLimitingSampler struct {
|
||||
maxTracesPerSecond float64
|
||||
rateLimiter utils.RateLimiter
|
||||
tags []Tag
|
||||
}
|
||||
|
||||
// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
|
||||
// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
|
||||
// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
|
||||
// sequential requests can be sampled each second.
|
||||
func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
|
||||
tags := []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
|
||||
{key: SamplerParamTagKey, value: maxTracesPerSecond},
|
||||
}
|
||||
return &rateLimitingSampler{
|
||||
maxTracesPerSecond: maxTracesPerSecond,
|
||||
rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
|
||||
tags: tags,
|
||||
}
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return s.rateLimiter.CheckCredit(1.0), s.tags
|
||||
}
|
||||
|
||||
func (s *rateLimitingSampler) Close() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (s *rateLimitingSampler) Equal(other Sampler) bool {
|
||||
if o, ok := other.(*rateLimitingSampler); ok {
|
||||
return s.maxTracesPerSecond == o.maxTracesPerSecond
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
|
||||
// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
|
||||
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
|
||||
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
|
||||
//
|
||||
// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
|
||||
// samplers return true, the tags for probabilisticSampler will be used.
|
||||
type GuaranteedThroughputProbabilisticSampler struct {
|
||||
probabilisticSampler *ProbabilisticSampler
|
||||
lowerBoundSampler Sampler
|
||||
tags []Tag
|
||||
samplingRate float64
|
||||
lowerBound float64
|
||||
}
|
||||
|
||||
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
|
||||
// probabilisticSampler and rateLimitingSampler.
|
||||
func NewGuaranteedThroughputProbabilisticSampler(
|
||||
lowerBound, samplingRate float64,
|
||||
) (*GuaranteedThroughputProbabilisticSampler, error) {
|
||||
return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
|
||||
}
|
||||
|
||||
func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
|
||||
s := &GuaranteedThroughputProbabilisticSampler{
|
||||
lowerBoundSampler: NewRateLimitingSampler(lowerBound),
|
||||
lowerBound: lowerBound,
|
||||
}
|
||||
s.setProbabilisticSampler(samplingRate)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
|
||||
if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
|
||||
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
|
||||
s.samplingRate = s.probabilisticSampler.SamplingRate()
|
||||
s.tags = []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
|
||||
{key: SamplerParamTagKey, value: s.samplingRate},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
|
||||
s.lowerBoundSampler.IsSampled(id, operation)
|
||||
return true, tags
|
||||
}
|
||||
sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
|
||||
return sampled, s.tags
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) Close() {
|
||||
s.probabilisticSampler.Close()
|
||||
s.lowerBoundSampler.Close()
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
|
||||
// more information.
|
||||
return false
|
||||
}
|
||||
|
||||
// this function should only be called while holding a Write lock
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
|
||||
s.setProbabilisticSampler(samplingRate)
|
||||
if s.lowerBound != lowerBound {
|
||||
s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
|
||||
s.lowerBound = lowerBound
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type adaptiveSampler struct {
|
||||
sync.RWMutex
|
||||
|
||||
samplers map[string]*GuaranteedThroughputProbabilisticSampler
|
||||
defaultSampler *ProbabilisticSampler
|
||||
lowerBound float64
|
||||
maxOperations int
|
||||
}
|
||||
|
||||
// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
|
||||
// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
|
||||
// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
|
||||
func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
|
||||
return newAdaptiveSampler(strategies, maxOperations), nil
|
||||
}
|
||||
|
||||
func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
|
||||
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
|
||||
for _, strategy := range strategies.PerOperationStrategies {
|
||||
sampler := newGuaranteedThroughputProbabilisticSampler(
|
||||
strategies.DefaultLowerBoundTracesPerSecond,
|
||||
strategy.ProbabilisticSampling.SamplingRate,
|
||||
)
|
||||
samplers[strategy.Operation] = sampler
|
||||
}
|
||||
return &adaptiveSampler{
|
||||
samplers: samplers,
|
||||
defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
|
||||
lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
|
||||
maxOperations: maxOperations,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
s.RLock()
|
||||
sampler, ok := s.samplers[operation]
|
||||
if ok {
|
||||
defer s.RUnlock()
|
||||
return sampler.IsSampled(id, operation)
|
||||
}
|
||||
s.RUnlock()
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// Check if sampler has already been created
|
||||
sampler, ok = s.samplers[operation]
|
||||
if ok {
|
||||
return sampler.IsSampled(id, operation)
|
||||
}
|
||||
// Store only up to maxOperations of unique ops.
|
||||
if len(s.samplers) >= s.maxOperations {
|
||||
return s.defaultSampler.IsSampled(id, operation)
|
||||
}
|
||||
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
|
||||
s.samplers[operation] = newSampler
|
||||
return newSampler.IsSampled(id, operation)
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) Close() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, sampler := range s.samplers {
|
||||
sampler.Close()
|
||||
}
|
||||
s.defaultSampler.Close()
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
|
||||
// samplers which all need to be initialized before this function can be called for a comparison.
|
||||
// Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
|
||||
// changing. Hence this function always returns false so that the update function can be called.
|
||||
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, strategy := range strategies.PerOperationStrategies {
|
||||
operation := strategy.Operation
|
||||
samplingRate := strategy.ProbabilisticSampling.SamplingRate
|
||||
lowerBound := strategies.DefaultLowerBoundTracesPerSecond
|
||||
if sampler, ok := s.samplers[operation]; ok {
|
||||
sampler.update(lowerBound, samplingRate)
|
||||
} else {
|
||||
sampler := newGuaranteedThroughputProbabilisticSampler(
|
||||
lowerBound,
|
||||
samplingRate,
|
||||
)
|
||||
s.samplers[operation] = sampler
|
||||
}
|
||||
}
|
||||
s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
|
||||
if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
|
||||
s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// RemotelyControlledSampler is a delegating sampler that polls a remote server
|
||||
// for the appropriate sampling strategy, constructs a corresponding sampler and
|
||||
// delegates to it for sampling decisions.
|
||||
type RemotelyControlledSampler struct {
|
||||
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
|
||||
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
|
||||
closed int64 // 0 - not closed, 1 - closed
|
||||
|
||||
sync.RWMutex
|
||||
samplerOptions
|
||||
|
||||
serviceName string
|
||||
manager sampling.SamplingManager
|
||||
doneChan chan *sync.WaitGroup
|
||||
}
|
||||
|
||||
type httpSamplingManager struct {
|
||||
serverURL string
|
||||
}
|
||||
|
||||
func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
|
||||
var out sampling.SamplingStrategyResponse
|
||||
v := url.Values{}
|
||||
v.Set("service", serviceName)
|
||||
if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// NewRemotelyControlledSampler creates a sampler that periodically pulls
|
||||
// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
|
||||
func NewRemotelyControlledSampler(
|
||||
serviceName string,
|
||||
opts ...SamplerOption,
|
||||
) *RemotelyControlledSampler {
|
||||
options := applySamplerOptions(opts...)
|
||||
sampler := &RemotelyControlledSampler{
|
||||
samplerOptions: options,
|
||||
serviceName: serviceName,
|
||||
manager: &httpSamplingManager{serverURL: options.samplingServerURL},
|
||||
doneChan: make(chan *sync.WaitGroup),
|
||||
}
|
||||
go sampler.pollController()
|
||||
return sampler
|
||||
}
|
||||
|
||||
func applySamplerOptions(opts ...SamplerOption) samplerOptions {
|
||||
options := samplerOptions{}
|
||||
for _, option := range opts {
|
||||
option(&options)
|
||||
}
|
||||
if options.sampler == nil {
|
||||
options.sampler = newProbabilisticSampler(0.001)
|
||||
}
|
||||
if options.logger == nil {
|
||||
options.logger = log.NullLogger
|
||||
}
|
||||
if options.maxOperations <= 0 {
|
||||
options.maxOperations = defaultMaxOperations
|
||||
}
|
||||
if options.samplingServerURL == "" {
|
||||
options.samplingServerURL = defaultSamplingServerURL
|
||||
}
|
||||
if options.metrics == nil {
|
||||
options.metrics = NewNullMetrics()
|
||||
}
|
||||
if options.samplingRefreshInterval <= 0 {
|
||||
options.samplingRefreshInterval = defaultSamplingRefreshInterval
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.sampler.IsSampled(id, operation)
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Close() {
|
||||
if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
|
||||
s.logger.Error("Repeated attempt to close the sampler is ignored")
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
s.doneChan <- &wg
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
|
||||
// more information.
|
||||
if o, ok := other.(*RemotelyControlledSampler); ok {
|
||||
s.RLock()
|
||||
o.RLock()
|
||||
defer s.RUnlock()
|
||||
defer o.RUnlock()
|
||||
return s.sampler.Equal(o.sampler)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollController() {
|
||||
ticker := time.NewTicker(s.samplingRefreshInterval)
|
||||
defer ticker.Stop()
|
||||
s.pollControllerWithTicker(ticker)
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.updateSampler()
|
||||
case wg := <-s.doneChan:
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) getSampler() Sampler {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.sampler
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.sampler = sampler
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) updateSampler() {
|
||||
res, err := s.manager.GetSamplingStrategy(s.serviceName)
|
||||
if err != nil {
|
||||
s.metrics.SamplerQueryFailure.Inc(1)
|
||||
return
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.metrics.SamplerRetrieved.Inc(1)
|
||||
if strategies := res.GetOperationSampling(); strategies != nil {
|
||||
s.updateAdaptiveSampler(strategies)
|
||||
} else {
|
||||
err = s.updateRateLimitingOrProbabilisticSampler(res)
|
||||
}
|
||||
if err != nil {
|
||||
s.metrics.SamplerUpdateFailure.Inc(1)
|
||||
s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
|
||||
return
|
||||
}
|
||||
s.metrics.SamplerUpdated.Inc(1)
|
||||
}
|
||||
|
||||
// NB: this function should only be called while holding a Write lock
|
||||
func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
|
||||
if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
|
||||
adaptiveSampler.update(strategies)
|
||||
} else {
|
||||
s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
|
||||
}
|
||||
}
|
||||
|
||||
// NB: this function should only be called while holding a Write lock
|
||||
func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
|
||||
var newSampler Sampler
|
||||
if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
|
||||
newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
|
||||
} else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
|
||||
newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
|
||||
}
|
||||
if !s.sampler.Equal(newSampler) {
|
||||
s.sampler = newSampler
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SamplerOption is a function that sets some option on the sampler
|
||||
type SamplerOption func(options *samplerOptions)
|
||||
|
||||
// SamplerOptions is a factory for all available SamplerOption's
|
||||
var SamplerOptions samplerOptions
|
||||
|
||||
type samplerOptions struct {
|
||||
metrics *Metrics
|
||||
maxOperations int
|
||||
sampler Sampler
|
||||
logger Logger
|
||||
samplingServerURL string
|
||||
samplingRefreshInterval time.Duration
|
||||
}
|
||||
|
||||
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
|
||||
// which is used to emit statistics.
|
||||
func (samplerOptions) Metrics(m *Metrics) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.metrics = m
|
||||
}
|
||||
}
|
||||
|
||||
// MaxOperations creates a SamplerOption that sets the maximum number of
|
||||
// operations the sampler will keep track of.
|
||||
func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.maxOperations = maxOperations
|
||||
}
|
||||
}
|
||||
|
||||
// InitialSampler creates a SamplerOption that sets the initial sampler
|
||||
// to use before a remote sampler is created and used.
|
||||
func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.sampler = sampler
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates a SamplerOption that sets the logger used by the sampler.
|
||||
func (samplerOptions) Logger(logger Logger) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingServerURL creates a SamplerOption that sets the sampling server url
|
||||
// of the local agent that contains the sampling strategies.
|
||||
func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingServerURL = samplingServerURL
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingRefreshInterval creates a SamplerOption that sets how often the
|
||||
// sampler will poll local agent for the appropriate sampling strategy.
|
||||
func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingRefreshInterval = samplingRefreshInterval
|
||||
}
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
// Copyright (c) 2017-2018 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
// Span implements opentracing.Span
|
||||
type Span struct {
|
||||
sync.RWMutex
|
||||
|
||||
tracer *Tracer
|
||||
|
||||
context SpanContext
|
||||
|
||||
// The name of the "operation" this span is an instance of.
|
||||
// Known as a "span name" in some implementations.
|
||||
operationName string
|
||||
|
||||
// firstInProcess, if true, indicates that this span is the root of the (sub)tree
|
||||
// of spans in the current process. In other words it's true for the root spans,
|
||||
// and the ingress spans when the process joins another trace.
|
||||
firstInProcess bool
|
||||
|
||||
// startTime is the timestamp indicating when the span began, with microseconds precision.
|
||||
startTime time.Time
|
||||
|
||||
// duration returns duration of the span with microseconds precision.
|
||||
// Zero value means duration is unknown.
|
||||
duration time.Duration
|
||||
|
||||
// tags attached to this span
|
||||
tags []Tag
|
||||
|
||||
// The span's "micro-log"
|
||||
logs []opentracing.LogRecord
|
||||
|
||||
// references for this span
|
||||
references []Reference
|
||||
|
||||
observer ContribSpanObserver
|
||||
}
|
||||
|
||||
// Tag is a simple key value wrapper.
|
||||
// TODO deprecate in the next major release, use opentracing.Tag instead.
|
||||
type Tag struct {
|
||||
key string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// SetOperationName sets or changes the operation name.
|
||||
func (s *Span) SetOperationName(operationName string) opentracing.Span {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.context.IsSampled() {
|
||||
s.operationName = operationName
|
||||
}
|
||||
s.observer.OnSetOperationName(operationName)
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTag implements SetTag() of opentracing.Span
|
||||
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
|
||||
s.observer.OnSetTag(key, value)
|
||||
if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
|
||||
return s
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.context.IsSampled() {
|
||||
s.setTagNoLocking(key, value)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Span) setTagNoLocking(key string, value interface{}) {
|
||||
s.tags = append(s.tags, Tag{key: key, value: value})
|
||||
}
|
||||
|
||||
// LogFields implements opentracing.Span API
|
||||
func (s *Span) LogFields(fields ...log.Field) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if !s.context.IsSampled() {
|
||||
return
|
||||
}
|
||||
s.logFieldsNoLocking(fields...)
|
||||
}
|
||||
|
||||
// this function should only be called while holding a Write lock
|
||||
func (s *Span) logFieldsNoLocking(fields ...log.Field) {
|
||||
lr := opentracing.LogRecord{
|
||||
Fields: fields,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
s.appendLog(lr)
|
||||
}
|
||||
|
||||
// LogKV implements opentracing.Span API
|
||||
func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
|
||||
s.RLock()
|
||||
sampled := s.context.IsSampled()
|
||||
s.RUnlock()
|
||||
if !sampled {
|
||||
return
|
||||
}
|
||||
fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
|
||||
if err != nil {
|
||||
s.LogFields(log.Error(err), log.String("function", "LogKV"))
|
||||
return
|
||||
}
|
||||
s.LogFields(fields...)
|
||||
}
|
||||
|
||||
// LogEvent implements opentracing.Span API
|
||||
func (s *Span) LogEvent(event string) {
|
||||
s.Log(opentracing.LogData{Event: event})
|
||||
}
|
||||
|
||||
// LogEventWithPayload implements opentracing.Span API
|
||||
func (s *Span) LogEventWithPayload(event string, payload interface{}) {
|
||||
s.Log(opentracing.LogData{Event: event, Payload: payload})
|
||||
}
|
||||
|
||||
// Log implements opentracing.Span API
|
||||
func (s *Span) Log(ld opentracing.LogData) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.context.IsSampled() {
|
||||
if ld.Timestamp.IsZero() {
|
||||
ld.Timestamp = s.tracer.timeNow()
|
||||
}
|
||||
s.appendLog(ld.ToLogRecord())
|
||||
}
|
||||
}
|
||||
|
||||
// this function should only be called while holding a Write lock
|
||||
func (s *Span) appendLog(lr opentracing.LogRecord) {
|
||||
// TODO add logic to limit number of logs per span (issue #46)
|
||||
s.logs = append(s.logs, lr)
|
||||
}
|
||||
|
||||
// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
|
||||
func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.tracer.setBaggage(s, key, value)
|
||||
return s
|
||||
}
|
||||
|
||||
// BaggageItem implements BaggageItem() of opentracing.SpanContext
|
||||
func (s *Span) BaggageItem(key string) string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.context.baggage[key]
|
||||
}
|
||||
|
||||
// Finish implements opentracing.Span API
|
||||
func (s *Span) Finish() {
|
||||
s.FinishWithOptions(opentracing.FinishOptions{})
|
||||
}
|
||||
|
||||
// FinishWithOptions implements opentracing.Span API
|
||||
func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
|
||||
if options.FinishTime.IsZero() {
|
||||
options.FinishTime = s.tracer.timeNow()
|
||||
}
|
||||
s.observer.OnFinish(options)
|
||||
s.Lock()
|
||||
if s.context.IsSampled() {
|
||||
s.duration = options.FinishTime.Sub(s.startTime)
|
||||
// Note: bulk logs are not subject to maxLogsPerSpan limit
|
||||
if options.LogRecords != nil {
|
||||
s.logs = append(s.logs, options.LogRecords...)
|
||||
}
|
||||
for _, ld := range options.BulkLogData {
|
||||
s.logs = append(s.logs, ld.ToLogRecord())
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
// call reportSpan even for non-sampled traces, to return span to the pool
|
||||
s.tracer.reportSpan(s)
|
||||
}
|
||||
|
||||
// Context implements opentracing.Span API
|
||||
func (s *Span) Context() opentracing.SpanContext {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.context
|
||||
}
|
||||
|
||||
// Tracer implements opentracing.Span API
|
||||
func (s *Span) Tracer() opentracing.Tracer {
|
||||
return s.tracer
|
||||
}
|
||||
|
||||
func (s *Span) String() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.context.String()
|
||||
}
|
||||
|
||||
// OperationName allows retrieving current operation name.
|
||||
func (s *Span) OperationName() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.operationName
|
||||
}
|
||||
|
||||
func (s *Span) serviceName() string {
|
||||
return s.tracer.serviceName
|
||||
}
|
||||
|
||||
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
|
||||
func setSamplingPriority(s *Span, value interface{}) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
val, ok := value.(uint16)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if val == 0 {
|
||||
s.context.flags = s.context.flags & (^flagSampled)
|
||||
return true
|
||||
}
|
||||
if s.tracer.isDebugAllowed(s.operationName) {
|
||||
s.context.flags = s.context.flags | flagDebug | flagSampled
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,411 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
var _ = jaeger.GoUnusedProtection__
|
||||
var _ = zipkincore.GoUnusedProtection__
|
||||
|
||||
type Agent interface {
|
||||
// Parameters:
|
||||
// - Spans
|
||||
EmitZipkinBatch(spans []*zipkincore.Span) (err error)
|
||||
// Parameters:
|
||||
// - Batch
|
||||
EmitBatch(batch *jaeger.Batch) (err error)
|
||||
}
|
||||
|
||||
type AgentClient struct {
|
||||
Transport thrift.TTransport
|
||||
ProtocolFactory thrift.TProtocolFactory
|
||||
InputProtocol thrift.TProtocol
|
||||
OutputProtocol thrift.TProtocol
|
||||
SeqId int32
|
||||
}
|
||||
|
||||
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
|
||||
return &AgentClient{Transport: t,
|
||||
ProtocolFactory: f,
|
||||
InputProtocol: f.GetProtocol(t),
|
||||
OutputProtocol: f.GetProtocol(t),
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
|
||||
return &AgentClient{Transport: t,
|
||||
ProtocolFactory: nil,
|
||||
InputProtocol: iprot,
|
||||
OutputProtocol: oprot,
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
// - Spans
|
||||
func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
|
||||
if err = p.sendEmitZipkinBatch(spans); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
|
||||
oprot := p.OutputProtocol
|
||||
if oprot == nil {
|
||||
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.OutputProtocol = oprot
|
||||
}
|
||||
p.SeqId++
|
||||
if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
|
||||
return
|
||||
}
|
||||
args := AgentEmitZipkinBatchArgs{
|
||||
Spans: spans,
|
||||
}
|
||||
if err = args.Write(oprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = oprot.WriteMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
return oprot.Flush()
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
// - Batch
|
||||
func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
|
||||
if err = p.sendEmitBatch(batch); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
|
||||
oprot := p.OutputProtocol
|
||||
if oprot == nil {
|
||||
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.OutputProtocol = oprot
|
||||
}
|
||||
p.SeqId++
|
||||
if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
|
||||
return
|
||||
}
|
||||
args := AgentEmitBatchArgs{
|
||||
Batch: batch,
|
||||
}
|
||||
if err = args.Write(oprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = oprot.WriteMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
return oprot.Flush()
|
||||
}
|
||||
|
||||
type AgentProcessor struct {
|
||||
processorMap map[string]thrift.TProcessorFunction
|
||||
handler Agent
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
|
||||
p.processorMap[key] = processor
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
|
||||
processor, ok = p.processorMap[key]
|
||||
return processor, ok
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
|
||||
return p.processorMap
|
||||
}
|
||||
|
||||
func NewAgentProcessor(handler Agent) *AgentProcessor {
|
||||
|
||||
self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
|
||||
self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
|
||||
self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
|
||||
return self0
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
name, _, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if processor, ok := p.GetProcessorFunction(name); ok {
|
||||
return processor.Process(seqId, iprot, oprot)
|
||||
}
|
||||
iprot.Skip(thrift.STRUCT)
|
||||
iprot.ReadMessageEnd()
|
||||
x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
|
||||
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
|
||||
x1.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, x1
|
||||
|
||||
}
|
||||
|
||||
type agentProcessorEmitZipkinBatch struct {
|
||||
handler Agent
|
||||
}
|
||||
|
||||
func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
args := AgentEmitZipkinBatchArgs{}
|
||||
if err = args.Read(iprot); err != nil {
|
||||
iprot.ReadMessageEnd()
|
||||
return false, err
|
||||
}
|
||||
|
||||
iprot.ReadMessageEnd()
|
||||
var err2 error
|
||||
if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
|
||||
return true, err2
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type agentProcessorEmitBatch struct {
|
||||
handler Agent
|
||||
}
|
||||
|
||||
func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
args := AgentEmitBatchArgs{}
|
||||
if err = args.Read(iprot); err != nil {
|
||||
iprot.ReadMessageEnd()
|
||||
return false, err
|
||||
}
|
||||
|
||||
iprot.ReadMessageEnd()
|
||||
var err2 error
|
||||
if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
|
||||
return true, err2
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS AND STRUCTURES
|
||||
|
||||
// Attributes:
|
||||
// - Spans
|
||||
type AgentEmitZipkinBatchArgs struct {
|
||||
Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
|
||||
}
|
||||
|
||||
func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
|
||||
return &AgentEmitZipkinBatchArgs{}
|
||||
}
|
||||
|
||||
func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
|
||||
return p.Spans
|
||||
}
|
||||
func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
|
||||
_, size, err := iprot.ReadListBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError("error reading list begin: ", err)
|
||||
}
|
||||
tSlice := make([]*zipkincore.Span, 0, size)
|
||||
p.Spans = tSlice
|
||||
for i := 0; i < size; i++ {
|
||||
_elem2 := &zipkincore.Span{}
|
||||
if err := _elem2.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
|
||||
}
|
||||
p.Spans = append(p.Spans, _elem2)
|
||||
}
|
||||
if err := iprot.ReadListEnd(); err != nil {
|
||||
return thrift.PrependError("error reading list end: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
|
||||
return thrift.PrependError("error writing list begin: ", err)
|
||||
}
|
||||
for _, v := range p.Spans {
|
||||
if err := v.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
|
||||
}
|
||||
}
|
||||
if err := oprot.WriteListEnd(); err != nil {
|
||||
return thrift.PrependError("error writing list end: ", err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *AgentEmitZipkinBatchArgs) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - Batch
|
||||
type AgentEmitBatchArgs struct {
|
||||
Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
|
||||
}
|
||||
|
||||
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
|
||||
return &AgentEmitBatchArgs{}
|
||||
}
|
||||
|
||||
var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
|
||||
|
||||
func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
|
||||
if !p.IsSetBatch() {
|
||||
return AgentEmitBatchArgs_Batch_DEFAULT
|
||||
}
|
||||
return p.Batch
|
||||
}
|
||||
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
|
||||
return p.Batch != nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
|
||||
p.Batch = &jaeger.Batch{}
|
||||
if err := p.Batch.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
|
||||
}
|
||||
if err := p.Batch.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
var _ = jaeger.GoUnusedProtection__
|
||||
var _ = zipkincore.GoUnusedProtection__
|
||||
|
||||
func init() {
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
var _ = jaeger.GoUnusedProtection__
|
||||
var _ = zipkincore.GoUnusedProtection__
|
||||
var GoUnusedProtection__ int
|
435
vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
generated
vendored
435
vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
generated
vendored
|
@ -1,435 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package baggage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
type BaggageRestrictionManager interface {
|
||||
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
|
||||
// Usually, baggageRestrictions apply to all services however there may be situations
|
||||
// where a baggageKey might only be allowed to be set by a specific service.
|
||||
//
|
||||
// Parameters:
|
||||
// - ServiceName
|
||||
GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
|
||||
}
|
||||
|
||||
type BaggageRestrictionManagerClient struct {
|
||||
Transport thrift.TTransport
|
||||
ProtocolFactory thrift.TProtocolFactory
|
||||
InputProtocol thrift.TProtocol
|
||||
OutputProtocol thrift.TProtocol
|
||||
SeqId int32
|
||||
}
|
||||
|
||||
func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
|
||||
return &BaggageRestrictionManagerClient{Transport: t,
|
||||
ProtocolFactory: f,
|
||||
InputProtocol: f.GetProtocol(t),
|
||||
OutputProtocol: f.GetProtocol(t),
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
|
||||
return &BaggageRestrictionManagerClient{Transport: t,
|
||||
ProtocolFactory: nil,
|
||||
InputProtocol: iprot,
|
||||
OutputProtocol: oprot,
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
|
||||
// Usually, baggageRestrictions apply to all services however there may be situations
|
||||
// where a baggageKey might only be allowed to be set by a specific service.
|
||||
//
|
||||
// Parameters:
|
||||
// - ServiceName
|
||||
func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
|
||||
if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
|
||||
return
|
||||
}
|
||||
return p.recvGetBaggageRestrictions()
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
|
||||
oprot := p.OutputProtocol
|
||||
if oprot == nil {
|
||||
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.OutputProtocol = oprot
|
||||
}
|
||||
p.SeqId++
|
||||
if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
|
||||
return
|
||||
}
|
||||
args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
|
||||
ServiceName: serviceName,
|
||||
}
|
||||
if err = args.Write(oprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = oprot.WriteMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
return oprot.Flush()
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
|
||||
iprot := p.InputProtocol
|
||||
if iprot == nil {
|
||||
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.InputProtocol = iprot
|
||||
}
|
||||
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if method != "getBaggageRestrictions" {
|
||||
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
|
||||
return
|
||||
}
|
||||
if p.SeqId != seqId {
|
||||
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
|
||||
return
|
||||
}
|
||||
if mTypeId == thrift.EXCEPTION {
|
||||
error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
|
||||
var error1 error
|
||||
error1, err = error0.Read(iprot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = iprot.ReadMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
err = error1
|
||||
return
|
||||
}
|
||||
if mTypeId != thrift.REPLY {
|
||||
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
|
||||
return
|
||||
}
|
||||
result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
|
||||
if err = result.Read(iprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = iprot.ReadMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
value = result.GetSuccess()
|
||||
return
|
||||
}
|
||||
|
||||
type BaggageRestrictionManagerProcessor struct {
|
||||
processorMap map[string]thrift.TProcessorFunction
|
||||
handler BaggageRestrictionManager
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
|
||||
p.processorMap[key] = processor
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
|
||||
processor, ok = p.processorMap[key]
|
||||
return processor, ok
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
|
||||
return p.processorMap
|
||||
}
|
||||
|
||||
func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
|
||||
|
||||
self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
|
||||
self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
|
||||
return self2
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
name, _, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if processor, ok := p.GetProcessorFunction(name); ok {
|
||||
return processor.Process(seqId, iprot, oprot)
|
||||
}
|
||||
iprot.Skip(thrift.STRUCT)
|
||||
iprot.ReadMessageEnd()
|
||||
x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
|
||||
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
|
||||
x3.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, x3
|
||||
|
||||
}
|
||||
|
||||
type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
|
||||
handler BaggageRestrictionManager
|
||||
}
|
||||
|
||||
func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
|
||||
if err = args.Read(iprot); err != nil {
|
||||
iprot.ReadMessageEnd()
|
||||
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
|
||||
oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
|
||||
x.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, err
|
||||
}
|
||||
|
||||
iprot.ReadMessageEnd()
|
||||
result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
|
||||
var retval []*BaggageRestriction
|
||||
var err2 error
|
||||
if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
|
||||
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
|
||||
oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
|
||||
x.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return true, err2
|
||||
} else {
|
||||
result.Success = retval
|
||||
}
|
||||
if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = result.Write(oprot); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = oprot.Flush(); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS AND STRUCTURES
|
||||
|
||||
// Attributes:
|
||||
// - ServiceName
|
||||
type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
|
||||
ServiceName string `thrift:"serviceName,1" json:"serviceName"`
|
||||
}
|
||||
|
||||
func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
|
||||
return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
|
||||
return p.ServiceName
|
||||
}
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadString(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.ServiceName = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteString(string(p.ServiceName)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - Success
|
||||
type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
|
||||
Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
|
||||
}
|
||||
|
||||
func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
|
||||
return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
|
||||
}
|
||||
|
||||
var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
|
||||
return p.Success
|
||||
}
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
|
||||
return p.Success != nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 0:
|
||||
if err := p.readField0(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
|
||||
_, size, err := iprot.ReadListBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError("error reading list begin: ", err)
|
||||
}
|
||||
tSlice := make([]*BaggageRestriction, 0, size)
|
||||
p.Success = tSlice
|
||||
for i := 0; i < size; i++ {
|
||||
_elem4 := &BaggageRestriction{}
|
||||
if err := _elem4.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
|
||||
}
|
||||
p.Success = append(p.Success, _elem4)
|
||||
}
|
||||
if err := iprot.ReadListEnd(); err != nil {
|
||||
return thrift.PrependError("error reading list end: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField0(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetSuccess() {
|
||||
if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
|
||||
return thrift.PrependError("error writing list begin: ", err)
|
||||
}
|
||||
for _, v := range p.Success {
|
||||
if err := v.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
|
||||
}
|
||||
}
|
||||
if err := oprot.WriteListEnd(); err != nil {
|
||||
return thrift.PrependError("error writing list end: ", err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package baggage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
func init() {
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package baggage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
var GoUnusedProtection__ int
|
||||
|
||||
// Attributes:
|
||||
// - BaggageKey
|
||||
// - MaxValueLength
|
||||
type BaggageRestriction struct {
|
||||
BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"`
|
||||
MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"`
|
||||
}
|
||||
|
||||
func NewBaggageRestriction() *BaggageRestriction {
|
||||
return &BaggageRestriction{}
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) GetBaggageKey() string {
|
||||
return p.BaggageKey
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) GetMaxValueLength() int32 {
|
||||
return p.MaxValueLength
|
||||
}
|
||||
func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetBaggageKey bool = false
|
||||
var issetMaxValueLength bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetBaggageKey = true
|
||||
case 2:
|
||||
if err := p.readField2(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetMaxValueLength = true
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetBaggageKey {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
|
||||
}
|
||||
if !issetMaxValueLength {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadString(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.BaggageKey = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadI32(); err != nil {
|
||||
return thrift.PrependError("error reading field 2: ", err)
|
||||
} else {
|
||||
p.MaxValueLength = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField2(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BaggageRestriction) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("BaggageRestriction(%+v)", *p)
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
type Agent interface {
|
||||
// Parameters:
|
||||
// - Batch
|
||||
EmitBatch(batch *Batch) (err error)
|
||||
}
|
||||
|
||||
type AgentClient struct {
|
||||
Transport thrift.TTransport
|
||||
ProtocolFactory thrift.TProtocolFactory
|
||||
InputProtocol thrift.TProtocol
|
||||
OutputProtocol thrift.TProtocol
|
||||
SeqId int32
|
||||
}
|
||||
|
||||
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
|
||||
return &AgentClient{Transport: t,
|
||||
ProtocolFactory: f,
|
||||
InputProtocol: f.GetProtocol(t),
|
||||
OutputProtocol: f.GetProtocol(t),
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
|
||||
return &AgentClient{Transport: t,
|
||||
ProtocolFactory: nil,
|
||||
InputProtocol: iprot,
|
||||
OutputProtocol: oprot,
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
// - Batch
|
||||
func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
|
||||
if err = p.sendEmitBatch(batch); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
|
||||
oprot := p.OutputProtocol
|
||||
if oprot == nil {
|
||||
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.OutputProtocol = oprot
|
||||
}
|
||||
p.SeqId++
|
||||
if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
|
||||
return
|
||||
}
|
||||
args := AgentEmitBatchArgs{
|
||||
Batch: batch,
|
||||
}
|
||||
if err = args.Write(oprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = oprot.WriteMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
return oprot.Flush()
|
||||
}
|
||||
|
||||
type AgentProcessor struct {
|
||||
processorMap map[string]thrift.TProcessorFunction
|
||||
handler Agent
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
|
||||
p.processorMap[key] = processor
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
|
||||
processor, ok = p.processorMap[key]
|
||||
return processor, ok
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
|
||||
return p.processorMap
|
||||
}
|
||||
|
||||
func NewAgentProcessor(handler Agent) *AgentProcessor {
|
||||
|
||||
self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
|
||||
self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
|
||||
return self6
|
||||
}
|
||||
|
||||
func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
name, _, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if processor, ok := p.GetProcessorFunction(name); ok {
|
||||
return processor.Process(seqId, iprot, oprot)
|
||||
}
|
||||
iprot.Skip(thrift.STRUCT)
|
||||
iprot.ReadMessageEnd()
|
||||
x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
|
||||
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
|
||||
x7.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, x7
|
||||
|
||||
}
|
||||
|
||||
type agentProcessorEmitBatch struct {
|
||||
handler Agent
|
||||
}
|
||||
|
||||
func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
args := AgentEmitBatchArgs{}
|
||||
if err = args.Read(iprot); err != nil {
|
||||
iprot.ReadMessageEnd()
|
||||
return false, err
|
||||
}
|
||||
|
||||
iprot.ReadMessageEnd()
|
||||
var err2 error
|
||||
if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
|
||||
return true, err2
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS AND STRUCTURES
|
||||
|
||||
// Attributes:
|
||||
// - Batch
|
||||
type AgentEmitBatchArgs struct {
|
||||
Batch *Batch `thrift:"batch,1" json:"batch"`
|
||||
}
|
||||
|
||||
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
|
||||
return &AgentEmitBatchArgs{}
|
||||
}
|
||||
|
||||
var AgentEmitBatchArgs_Batch_DEFAULT *Batch
|
||||
|
||||
func (p *AgentEmitBatchArgs) GetBatch() *Batch {
|
||||
if !p.IsSetBatch() {
|
||||
return AgentEmitBatchArgs_Batch_DEFAULT
|
||||
}
|
||||
return p.Batch
|
||||
}
|
||||
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
|
||||
return p.Batch != nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
|
||||
p.Batch = &Batch{}
|
||||
if err := p.Batch.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
|
||||
}
|
||||
if err := p.Batch.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *AgentEmitBatchArgs) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
func init() {
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,18 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package sampling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
func init() {
|
||||
}
|
410
vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
generated
vendored
410
vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
generated
vendored
|
@ -1,410 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package sampling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
type SamplingManager interface {
|
||||
// Parameters:
|
||||
// - ServiceName
|
||||
GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
|
||||
}
|
||||
|
||||
type SamplingManagerClient struct {
|
||||
Transport thrift.TTransport
|
||||
ProtocolFactory thrift.TProtocolFactory
|
||||
InputProtocol thrift.TProtocol
|
||||
OutputProtocol thrift.TProtocol
|
||||
SeqId int32
|
||||
}
|
||||
|
||||
func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
|
||||
return &SamplingManagerClient{Transport: t,
|
||||
ProtocolFactory: f,
|
||||
InputProtocol: f.GetProtocol(t),
|
||||
OutputProtocol: f.GetProtocol(t),
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
|
||||
return &SamplingManagerClient{Transport: t,
|
||||
ProtocolFactory: nil,
|
||||
InputProtocol: iprot,
|
||||
OutputProtocol: oprot,
|
||||
SeqId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
// - ServiceName
|
||||
func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
|
||||
if err = p.sendGetSamplingStrategy(serviceName); err != nil {
|
||||
return
|
||||
}
|
||||
return p.recvGetSamplingStrategy()
|
||||
}
|
||||
|
||||
func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
|
||||
oprot := p.OutputProtocol
|
||||
if oprot == nil {
|
||||
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.OutputProtocol = oprot
|
||||
}
|
||||
p.SeqId++
|
||||
if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
|
||||
return
|
||||
}
|
||||
args := SamplingManagerGetSamplingStrategyArgs{
|
||||
ServiceName: serviceName,
|
||||
}
|
||||
if err = args.Write(oprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = oprot.WriteMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
return oprot.Flush()
|
||||
}
|
||||
|
||||
func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
|
||||
iprot := p.InputProtocol
|
||||
if iprot == nil {
|
||||
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
|
||||
p.InputProtocol = iprot
|
||||
}
|
||||
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if method != "getSamplingStrategy" {
|
||||
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
|
||||
return
|
||||
}
|
||||
if p.SeqId != seqId {
|
||||
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
|
||||
return
|
||||
}
|
||||
if mTypeId == thrift.EXCEPTION {
|
||||
error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
|
||||
var error2 error
|
||||
error2, err = error1.Read(iprot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = iprot.ReadMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
err = error2
|
||||
return
|
||||
}
|
||||
if mTypeId != thrift.REPLY {
|
||||
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
|
||||
return
|
||||
}
|
||||
result := SamplingManagerGetSamplingStrategyResult{}
|
||||
if err = result.Read(iprot); err != nil {
|
||||
return
|
||||
}
|
||||
if err = iprot.ReadMessageEnd(); err != nil {
|
||||
return
|
||||
}
|
||||
value = result.GetSuccess()
|
||||
return
|
||||
}
|
||||
|
||||
type SamplingManagerProcessor struct {
|
||||
processorMap map[string]thrift.TProcessorFunction
|
||||
handler SamplingManager
|
||||
}
|
||||
|
||||
func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
|
||||
p.processorMap[key] = processor
|
||||
}
|
||||
|
||||
func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
|
||||
processor, ok = p.processorMap[key]
|
||||
return processor, ok
|
||||
}
|
||||
|
||||
func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
|
||||
return p.processorMap
|
||||
}
|
||||
|
||||
func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
|
||||
|
||||
self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
|
||||
self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
|
||||
return self3
|
||||
}
|
||||
|
||||
func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
name, _, seqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if processor, ok := p.GetProcessorFunction(name); ok {
|
||||
return processor.Process(seqId, iprot, oprot)
|
||||
}
|
||||
iprot.Skip(thrift.STRUCT)
|
||||
iprot.ReadMessageEnd()
|
||||
x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
|
||||
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
|
||||
x4.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, x4
|
||||
|
||||
}
|
||||
|
||||
type samplingManagerProcessorGetSamplingStrategy struct {
|
||||
handler SamplingManager
|
||||
}
|
||||
|
||||
func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
|
||||
args := SamplingManagerGetSamplingStrategyArgs{}
|
||||
if err = args.Read(iprot); err != nil {
|
||||
iprot.ReadMessageEnd()
|
||||
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
|
||||
oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
|
||||
x.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return false, err
|
||||
}
|
||||
|
||||
iprot.ReadMessageEnd()
|
||||
result := SamplingManagerGetSamplingStrategyResult{}
|
||||
var retval *SamplingStrategyResponse
|
||||
var err2 error
|
||||
if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
|
||||
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
|
||||
oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
|
||||
x.Write(oprot)
|
||||
oprot.WriteMessageEnd()
|
||||
oprot.Flush()
|
||||
return true, err2
|
||||
} else {
|
||||
result.Success = retval
|
||||
}
|
||||
if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = result.Write(oprot); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 = oprot.Flush(); err == nil && err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS AND STRUCTURES
|
||||
|
||||
// Attributes:
|
||||
// - ServiceName
|
||||
type SamplingManagerGetSamplingStrategyArgs struct {
|
||||
ServiceName string `thrift:"serviceName,1" json:"serviceName"`
|
||||
}
|
||||
|
||||
func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
|
||||
return &SamplingManagerGetSamplingStrategyArgs{}
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
|
||||
return p.ServiceName
|
||||
}
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadString(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.ServiceName = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteString(string(p.ServiceName)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - Success
|
||||
type SamplingManagerGetSamplingStrategyResult struct {
|
||||
Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
|
||||
}
|
||||
|
||||
func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
|
||||
return &SamplingManagerGetSamplingStrategyResult{}
|
||||
}
|
||||
|
||||
var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
|
||||
if !p.IsSetSuccess() {
|
||||
return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
|
||||
}
|
||||
return p.Success
|
||||
}
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
|
||||
return p.Success != nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 0:
|
||||
if err := p.readField0(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
|
||||
p.Success = &SamplingStrategyResponse{}
|
||||
if err := p.Success.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField0(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetSuccess() {
|
||||
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
|
||||
}
|
||||
if err := p.Success.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingManagerGetSamplingStrategyResult) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
|
||||
}
|
|
@ -1,873 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package sampling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
var GoUnusedProtection__ int
|
||||
|
||||
type SamplingStrategyType int64
|
||||
|
||||
const (
|
||||
SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
|
||||
SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
|
||||
)
|
||||
|
||||
func (p SamplingStrategyType) String() string {
|
||||
switch p {
|
||||
case SamplingStrategyType_PROBABILISTIC:
|
||||
return "PROBABILISTIC"
|
||||
case SamplingStrategyType_RATE_LIMITING:
|
||||
return "RATE_LIMITING"
|
||||
}
|
||||
return "<UNSET>"
|
||||
}
|
||||
|
||||
func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
|
||||
switch s {
|
||||
case "PROBABILISTIC":
|
||||
return SamplingStrategyType_PROBABILISTIC, nil
|
||||
case "RATE_LIMITING":
|
||||
return SamplingStrategyType_RATE_LIMITING, nil
|
||||
}
|
||||
return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
|
||||
}
|
||||
|
||||
func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
|
||||
|
||||
func (p SamplingStrategyType) MarshalText() ([]byte, error) {
|
||||
return []byte(p.String()), nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
|
||||
q, err := SamplingStrategyTypeFromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*p = q
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - SamplingRate
|
||||
type ProbabilisticSamplingStrategy struct {
|
||||
SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
|
||||
}
|
||||
|
||||
func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
|
||||
return &ProbabilisticSamplingStrategy{}
|
||||
}
|
||||
|
||||
func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
|
||||
return p.SamplingRate
|
||||
}
|
||||
func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetSamplingRate bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetSamplingRate = true
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetSamplingRate {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadDouble(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.SamplingRate = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *ProbabilisticSamplingStrategy) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - MaxTracesPerSecond
|
||||
type RateLimitingSamplingStrategy struct {
|
||||
MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
|
||||
}
|
||||
|
||||
func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
|
||||
return &RateLimitingSamplingStrategy{}
|
||||
}
|
||||
|
||||
func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
|
||||
return p.MaxTracesPerSecond
|
||||
}
|
||||
func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetMaxTracesPerSecond bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetMaxTracesPerSecond = true
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetMaxTracesPerSecond {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadI16(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.MaxTracesPerSecond = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *RateLimitingSamplingStrategy) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - Operation
|
||||
// - ProbabilisticSampling
|
||||
type OperationSamplingStrategy struct {
|
||||
Operation string `thrift:"operation,1,required" json:"operation"`
|
||||
ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
|
||||
}
|
||||
|
||||
func NewOperationSamplingStrategy() *OperationSamplingStrategy {
|
||||
return &OperationSamplingStrategy{}
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) GetOperation() string {
|
||||
return p.Operation
|
||||
}
|
||||
|
||||
var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
|
||||
|
||||
func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
|
||||
if !p.IsSetProbabilisticSampling() {
|
||||
return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
|
||||
}
|
||||
return p.ProbabilisticSampling
|
||||
}
|
||||
func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
|
||||
return p.ProbabilisticSampling != nil
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetOperation bool = false
|
||||
var issetProbabilisticSampling bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetOperation = true
|
||||
case 2:
|
||||
if err := p.readField2(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetProbabilisticSampling = true
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetOperation {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
|
||||
}
|
||||
if !issetProbabilisticSampling {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadString(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.Operation = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
|
||||
p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
|
||||
if err := p.ProbabilisticSampling.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField2(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteString(string(p.Operation)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
|
||||
}
|
||||
if err := p.ProbabilisticSampling.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *OperationSamplingStrategy) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - DefaultSamplingProbability
|
||||
// - DefaultLowerBoundTracesPerSecond
|
||||
// - PerOperationStrategies
|
||||
// - DefaultUpperBoundTracesPerSecond
|
||||
type PerOperationSamplingStrategies struct {
|
||||
DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
|
||||
DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
|
||||
PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
|
||||
DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
|
||||
}
|
||||
|
||||
func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
|
||||
return &PerOperationSamplingStrategies{}
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
|
||||
return p.DefaultSamplingProbability
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
|
||||
return p.DefaultLowerBoundTracesPerSecond
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
|
||||
return p.PerOperationStrategies
|
||||
}
|
||||
|
||||
var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
|
||||
|
||||
func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
|
||||
if !p.IsSetDefaultUpperBoundTracesPerSecond() {
|
||||
return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
|
||||
}
|
||||
return *p.DefaultUpperBoundTracesPerSecond
|
||||
}
|
||||
func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
|
||||
return p.DefaultUpperBoundTracesPerSecond != nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetDefaultSamplingProbability bool = false
|
||||
var issetDefaultLowerBoundTracesPerSecond bool = false
|
||||
var issetPerOperationStrategies bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetDefaultSamplingProbability = true
|
||||
case 2:
|
||||
if err := p.readField2(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetDefaultLowerBoundTracesPerSecond = true
|
||||
case 3:
|
||||
if err := p.readField3(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetPerOperationStrategies = true
|
||||
case 4:
|
||||
if err := p.readField4(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetDefaultSamplingProbability {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
|
||||
}
|
||||
if !issetDefaultLowerBoundTracesPerSecond {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
|
||||
}
|
||||
if !issetPerOperationStrategies {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadDouble(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
p.DefaultSamplingProbability = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadDouble(); err != nil {
|
||||
return thrift.PrependError("error reading field 2: ", err)
|
||||
} else {
|
||||
p.DefaultLowerBoundTracesPerSecond = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
|
||||
_, size, err := iprot.ReadListBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError("error reading list begin: ", err)
|
||||
}
|
||||
tSlice := make([]*OperationSamplingStrategy, 0, size)
|
||||
p.PerOperationStrategies = tSlice
|
||||
for i := 0; i < size; i++ {
|
||||
_elem0 := &OperationSamplingStrategy{}
|
||||
if err := _elem0.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
|
||||
}
|
||||
p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
|
||||
}
|
||||
if err := iprot.ReadListEnd(); err != nil {
|
||||
return thrift.PrependError("error reading list end: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadDouble(); err != nil {
|
||||
return thrift.PrependError("error reading field 4: ", err)
|
||||
} else {
|
||||
p.DefaultUpperBoundTracesPerSecond = &v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField2(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField3(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField4(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
|
||||
return thrift.PrependError("error writing list begin: ", err)
|
||||
}
|
||||
for _, v := range p.PerOperationStrategies {
|
||||
if err := v.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
|
||||
}
|
||||
}
|
||||
if err := oprot.WriteListEnd(); err != nil {
|
||||
return thrift.PrependError("error writing list end: ", err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetDefaultUpperBoundTracesPerSecond() {
|
||||
if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *PerOperationSamplingStrategies) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
|
||||
}
|
||||
|
||||
// Attributes:
|
||||
// - StrategyType
|
||||
// - ProbabilisticSampling
|
||||
// - RateLimitingSampling
|
||||
// - OperationSampling
|
||||
type SamplingStrategyResponse struct {
|
||||
StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"`
|
||||
ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
|
||||
RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
|
||||
OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
|
||||
}
|
||||
|
||||
func NewSamplingStrategyResponse() *SamplingStrategyResponse {
|
||||
return &SamplingStrategyResponse{}
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
|
||||
return p.StrategyType
|
||||
}
|
||||
|
||||
var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
|
||||
|
||||
func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
|
||||
if !p.IsSetProbabilisticSampling() {
|
||||
return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
|
||||
}
|
||||
return p.ProbabilisticSampling
|
||||
}
|
||||
|
||||
var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
|
||||
|
||||
func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
|
||||
if !p.IsSetRateLimitingSampling() {
|
||||
return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
|
||||
}
|
||||
return p.RateLimitingSampling
|
||||
}
|
||||
|
||||
var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
|
||||
|
||||
func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
|
||||
if !p.IsSetOperationSampling() {
|
||||
return SamplingStrategyResponse_OperationSampling_DEFAULT
|
||||
}
|
||||
return p.OperationSampling
|
||||
}
|
||||
func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
|
||||
return p.ProbabilisticSampling != nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
|
||||
return p.RateLimitingSampling != nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
|
||||
return p.OperationSampling != nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
|
||||
if _, err := iprot.ReadStructBegin(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
|
||||
}
|
||||
|
||||
var issetStrategyType bool = false
|
||||
|
||||
for {
|
||||
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
|
||||
}
|
||||
if fieldTypeId == thrift.STOP {
|
||||
break
|
||||
}
|
||||
switch fieldId {
|
||||
case 1:
|
||||
if err := p.readField1(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
issetStrategyType = true
|
||||
case 2:
|
||||
if err := p.readField2(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
case 3:
|
||||
if err := p.readField3(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
case 4:
|
||||
if err := p.readField4(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := iprot.Skip(fieldTypeId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
|
||||
}
|
||||
if !issetStrategyType {
|
||||
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
|
||||
if v, err := iprot.ReadI32(); err != nil {
|
||||
return thrift.PrependError("error reading field 1: ", err)
|
||||
} else {
|
||||
temp := SamplingStrategyType(v)
|
||||
p.StrategyType = temp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
|
||||
p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
|
||||
if err := p.ProbabilisticSampling.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
|
||||
p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
|
||||
if err := p.RateLimitingSampling.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
|
||||
p.OperationSampling = &PerOperationSamplingStrategies{}
|
||||
if err := p.OperationSampling.Read(iprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
|
||||
if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
|
||||
}
|
||||
if err := p.writeField1(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField2(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField3(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.writeField4(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteFieldStop(); err != nil {
|
||||
return thrift.PrependError("write field stop error: ", err)
|
||||
}
|
||||
if err := oprot.WriteStructEnd(); err != nil {
|
||||
return thrift.PrependError("write struct stop error: ", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
|
||||
if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetProbabilisticSampling() {
|
||||
if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
|
||||
}
|
||||
if err := p.ProbabilisticSampling.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetRateLimitingSampling() {
|
||||
if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
|
||||
}
|
||||
if err := p.RateLimitingSampling.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
|
||||
if p.IsSetOperationSampling() {
|
||||
if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
|
||||
}
|
||||
if err := p.OperationSampling.Write(oprot); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
|
||||
}
|
||||
if err := oprot.WriteFieldEnd(); err != nil {
|
||||
return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *SamplingStrategyResponse) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
// Autogenerated by Thrift Compiler (0.9.3)
|
||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
|
||||
package zipkincore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/uber/jaeger-client-go/thrift"
|
||||
)
|
||||
|
||||
// (needed to ensure safety because of naive import list construction.)
|
||||
var _ = thrift.ZERO
|
||||
var _ = fmt.Printf
|
||||
var _ = bytes.Equal
|
||||
|
||||
const CLIENT_SEND = "cs"
|
||||
const CLIENT_RECV = "cr"
|
||||
const SERVER_SEND = "ss"
|
||||
const SERVER_RECV = "sr"
|
||||
const WIRE_SEND = "ws"
|
||||
const WIRE_RECV = "wr"
|
||||
const CLIENT_SEND_FRAGMENT = "csf"
|
||||
const CLIENT_RECV_FRAGMENT = "crf"
|
||||
const SERVER_SEND_FRAGMENT = "ssf"
|
||||
const SERVER_RECV_FRAGMENT = "srf"
|
||||
const LOCAL_COMPONENT = "lc"
|
||||
const CLIENT_ADDR = "ca"
|
||||
const SERVER_ADDR = "sa"
|
||||
|
||||
func init() {
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue