vendor, internal/build: fix OpenBSD by bumping Azure libs (#17966)
* bump azure-storage-blob-go dependency to 0.3.0 release * update azure-storage-blob-go module import path * fix multiple return values on azblob.NewSharedKeyCredential * vendor: bump Azure libs to latest from upstream
This commit is contained in:
parent
4f56790efc
commit
57d9c93dcd
|
@ -22,7 +22,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AzureBlobstoreConfig is an authentication and configuration struct containing
|
// AzureBlobstoreConfig is an authentication and configuration struct containing
|
||||||
|
@ -45,7 +45,11 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Create an authenticated client against the Azure cloud
|
// Create an authenticated client against the Azure cloud
|
||||||
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||||
|
|
||||||
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
||||||
|
@ -67,7 +71,11 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
||||||
|
|
||||||
// AzureBlobstoreList lists all the files contained within an azure blobstore.
|
// AzureBlobstoreList lists all the files contained within an azure blobstore.
|
||||||
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]azblob.BlobItem, error) {
|
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]azblob.BlobItem, error) {
|
||||||
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||||
|
|
||||||
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
||||||
|
@ -95,7 +103,11 @@ func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []azblob.BlobItem)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Create an authenticated client against the Azure cloud
|
// Create an authenticated client against the Azure cloud
|
||||||
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||||
|
|
||||||
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
|
||||||
|
|
|
@ -2,6 +2,7 @@ package pipeline
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/mattn/go-ieproxy"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -204,7 +205,7 @@ func newDefaultHTTPClient() *http.Client {
|
||||||
// We want the Transport to have a large connection pool
|
// We want the Transport to have a large connection pool
|
||||||
return &http.Client{
|
return &http.Client{
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: ieproxy.GetProxyFunc(),
|
||||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
||||||
Dial /*Context*/ : (&net.Dialer{
|
Dial /*Context*/ : (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
|
@ -253,3 +254,31 @@ type methodFactoryMarker struct {
|
||||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
||||||
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
|
||||||
|
// By default no implemetation is provided here, because pipeline may be used in many different
|
||||||
|
// contexts, so the correct implementation is context-dependent
|
||||||
|
type LogSanitizer interface {
|
||||||
|
SanitizeLogMessage(raw string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
var sanitizer LogSanitizer
|
||||||
|
var enableForceLog bool = true
|
||||||
|
|
||||||
|
// SetLogSanitizer can be called to supply a custom LogSanitizer.
|
||||||
|
// There is no threadsafety or locking on the underlying variable,
|
||||||
|
// so call this function just once at startup of your application
|
||||||
|
// (Don't later try to change the sanitizer on the fly).
|
||||||
|
func SetLogSanitizer(s LogSanitizer)(){
|
||||||
|
sanitizer = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetForceLogEnabled can be used to disable ForceLog
|
||||||
|
// There is no threadsafety or locking on the underlying variable,
|
||||||
|
// so call this function just once at startup of your application
|
||||||
|
// (Don't later try to change the setting on the fly).
|
||||||
|
func SetForceLogEnabled(enable bool)() {
|
||||||
|
enableForceLog = enable
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
package pipeline
|
||||||
|
|
||||||
|
|
||||||
|
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||||
|
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||||
|
func ForceLog(level LogLevel, msg string) {
|
||||||
|
if !enableForceLog {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if sanitizer != nil {
|
||||||
|
msg = sanitizer.SanitizeLogMessage(msg)
|
||||||
|
}
|
||||||
|
forceLog(level, msg)
|
||||||
|
}
|
|
@ -7,9 +7,9 @@ import (
|
||||||
"log/syslog"
|
"log/syslog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
// forceLog should rarely be used. It forceable logs an entry to the
|
||||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||||
func ForceLog(level LogLevel, msg string) {
|
func forceLog(level LogLevel, msg string) {
|
||||||
if defaultLogger == nil {
|
if defaultLogger == nil {
|
||||||
return // Return fast if we failed to create the logger.
|
return // Return fast if we failed to create the logger.
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
// forceLog should rarely be used. It forceable logs an entry to the
|
||||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||||
func ForceLog(level LogLevel, msg string) {
|
func forceLog(level LogLevel, msg string) {
|
||||||
var el eventType
|
var el eventType
|
||||||
switch level {
|
switch level {
|
||||||
case LogError, LogFatal, LogPanic:
|
case LogError, LogFatal, LogPanic:
|
||||||
|
@ -35,7 +35,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
||||||
advAPI32 := syscall.MustLoadDLL("AdvAPI32.dll")
|
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
|
||||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
||||||
|
|
||||||
sourceName, _ := os.Executable()
|
sourceName, _ := os.Executable()
|
||||||
|
|
|
@ -9,6 +9,23 @@ type causer interface {
|
||||||
Cause() error
|
Cause() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func errorWithPC(msg string, pc uintptr) string {
|
||||||
|
s := ""
|
||||||
|
if fn := runtime.FuncForPC(pc); fn != nil {
|
||||||
|
file, line := fn.FileLine(pc)
|
||||||
|
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
||||||
|
}
|
||||||
|
s += msg + "\n\n"
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPC(callersToSkip int) uintptr {
|
||||||
|
// Get the PC of Initialize method's caller.
|
||||||
|
pc := [1]uintptr{}
|
||||||
|
_ = runtime.Callers(callersToSkip, pc[:])
|
||||||
|
return pc[0]
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorNode can be an embedded field in a private error object. This field
|
// ErrorNode can be an embedded field in a private error object. This field
|
||||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
||||||
// When initializing a error type with this embedded field, initialize the
|
// When initializing a error type with this embedded field, initialize the
|
||||||
|
@ -22,12 +39,7 @@ type ErrorNode struct {
|
||||||
// When defining a new error type, have its Error method call this one passing
|
// When defining a new error type, have its Error method call this one passing
|
||||||
// it the string representation of the error.
|
// it the string representation of the error.
|
||||||
func (e *ErrorNode) Error(msg string) string {
|
func (e *ErrorNode) Error(msg string) string {
|
||||||
s := ""
|
s := errorWithPC(msg, e.pc)
|
||||||
if fn := runtime.FuncForPC(e.pc); fn != nil {
|
|
||||||
file, line := fn.FileLine(e.pc)
|
|
||||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
|
||||||
}
|
|
||||||
s += msg + "\n\n"
|
|
||||||
if e.cause != nil {
|
if e.cause != nil {
|
||||||
s += e.cause.Error() + "\n"
|
s += e.cause.Error() + "\n"
|
||||||
}
|
}
|
||||||
|
@ -83,10 +95,8 @@ func (e ErrorNode) Timeout() bool {
|
||||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||||
// a different value.
|
// a different value.
|
||||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
||||||
// Get the PC of Initialize method's caller.
|
pc := getPC(callersToSkip)
|
||||||
pc := [1]uintptr{}
|
return ErrorNode{pc: pc, cause: cause}
|
||||||
_ = runtime.Callers(callersToSkip, pc[:])
|
|
||||||
return ErrorNode{pc: pc[0], cause: cause}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cause walks all the preceding errors and return the originating error.
|
// Cause walks all the preceding errors and return the originating error.
|
||||||
|
@ -101,14 +111,55 @@ func Cause(err error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorNodeNoCause can be an embedded field in a private error object. This field
|
||||||
|
// adds Program Counter support.
|
||||||
|
// When initializing a error type with this embedded field, initialize the
|
||||||
|
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
|
||||||
|
type ErrorNodeNoCause struct {
|
||||||
|
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||||
|
// When defining a new error type, have its Error method call this one passing
|
||||||
|
// it the string representation of the error.
|
||||||
|
func (e *ErrorNodeNoCause) Error(msg string) string {
|
||||||
|
return errorWithPC(msg, e.pc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary returns true if the error occurred due to a temporary condition.
|
||||||
|
func (e ErrorNodeNoCause) Temporary() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout returns true if the error occurred due to time expiring.
|
||||||
|
func (e ErrorNodeNoCause) Timeout() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize is used to initialize an embedded ErrorNode field.
|
||||||
|
// It captures the caller's program counter.
|
||||||
|
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
|
||||||
|
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||||
|
// a different value.
|
||||||
|
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
|
||||||
|
pc := getPC(callersToSkip)
|
||||||
|
return ErrorNodeNoCause{pc: pc}
|
||||||
|
}
|
||||||
|
|
||||||
// NewError creates a simple string error (like Error.New). But, this
|
// NewError creates a simple string error (like Error.New). But, this
|
||||||
// error also captures the caller's Program Counter and the preceding error.
|
// error also captures the caller's Program Counter and the preceding error (if provided).
|
||||||
func NewError(cause error, msg string) error {
|
func NewError(cause error, msg string) error {
|
||||||
|
if cause != nil {
|
||||||
return &pcError{
|
return &pcError{
|
||||||
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
||||||
msg: msg,
|
msg: msg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return &pcErrorNoCause{
|
||||||
|
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
|
||||||
|
msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
||||||
type pcError struct {
|
type pcError struct {
|
||||||
|
@ -119,3 +170,12 @@ type pcError struct {
|
||||||
// Error satisfies the error interface. It shows the error with Program Counter
|
// Error satisfies the error interface. It shows the error with Program Counter
|
||||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
// symbols and calls Error on the preceding error so you can see the full error chain.
|
||||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
||||||
|
|
||||||
|
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
|
||||||
|
type pcErrorNoCause struct {
|
||||||
|
ErrorNodeNoCause
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfies the error interface. It shows the error with Program Counter symbols.
|
||||||
|
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }
|
||||||
|
|
|
@ -5,5 +5,5 @@ const (
|
||||||
UserAgent = "azure-pipeline-go/" + Version
|
UserAgent = "azure-pipeline-go/" + Version
|
||||||
|
|
||||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
||||||
Version = "0.1.0"
|
Version = "0.2.1"
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
const serviceLibVersion = "0.1"
|
|
122
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go
generated
vendored
122
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go
generated
vendored
|
@ -1,122 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
const CountToEnd = 0
|
|
||||||
|
|
||||||
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
|
||||||
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
|
|
||||||
|
|
||||||
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
|
||||||
// that should be used to make an HTTP GET request.
|
|
||||||
type HTTPGetterInfo struct {
|
|
||||||
// Offset specifies the start offset that should be used when
|
|
||||||
// creating the HTTP GET request's Range header
|
|
||||||
Offset int64
|
|
||||||
|
|
||||||
// Count specifies the count of bytes that should be used to calculate
|
|
||||||
// the end offset when creating the HTTP GET request's Range header
|
|
||||||
Count int64
|
|
||||||
|
|
||||||
// ETag specifies the resource's etag that should be used when creating
|
|
||||||
// the HTTP GET request's If-Match header
|
|
||||||
ETag ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
|
||||||
type RetryReaderOptions struct {
|
|
||||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
|
||||||
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
|
||||||
// GET requests will be made.
|
|
||||||
MaxRetryRequests int
|
|
||||||
doInjectError bool
|
|
||||||
doInjectErrorRound int
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryReader implements io.ReaderCloser methods.
|
|
||||||
// retryReader tries to read from response, and if there is retriable network error
|
|
||||||
// returned during reading, it will retry according to retry reader option through executing
|
|
||||||
// user defined action with provided data to get a new response, and continue the overall reading process
|
|
||||||
// through reading from the new response.
|
|
||||||
type retryReader struct {
|
|
||||||
ctx context.Context
|
|
||||||
response *http.Response
|
|
||||||
info HTTPGetterInfo
|
|
||||||
countWasBounded bool
|
|
||||||
o RetryReaderOptions
|
|
||||||
getter HTTPGetter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRetryReader creates a retry reader.
|
|
||||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
|
||||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
|
||||||
if getter == nil {
|
|
||||||
panic("getter must not be nil")
|
|
||||||
}
|
|
||||||
if info.Count < 0 {
|
|
||||||
panic("info.Count must be >= 0")
|
|
||||||
}
|
|
||||||
if o.MaxRetryRequests < 0 {
|
|
||||||
panic("o.MaxRetryRequests must be >= 0")
|
|
||||||
}
|
|
||||||
return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
|
||||||
for try := 0; ; try++ {
|
|
||||||
//fmt.Println(try) // Comment out for debugging.
|
|
||||||
if s.countWasBounded && s.info.Count == CountToEnd {
|
|
||||||
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.response == nil { // We don't have a response stream to read from, try to get one.
|
|
||||||
response, err := s.getter(s.ctx, s.info)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
// Successful GET; this is the network stream we'll read from.
|
|
||||||
s.response = response
|
|
||||||
}
|
|
||||||
n, err := s.response.Body.Read(p) // Read from the stream
|
|
||||||
|
|
||||||
// Injection mechanism for testing.
|
|
||||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
|
||||||
err = &net.DNSError{IsTemporary: true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We successfully read data or end EOF.
|
|
||||||
if err == nil || err == io.EOF {
|
|
||||||
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
|
||||||
if s.info.Count != CountToEnd {
|
|
||||||
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
|
||||||
}
|
|
||||||
return n, err // Return the return to the caller
|
|
||||||
}
|
|
||||||
s.Close() // Error, close stream
|
|
||||||
s.response = nil // Our stream is no longer good
|
|
||||||
|
|
||||||
// Check the retry count and error code, and decide whether to retry.
|
|
||||||
if try >= s.o.MaxRetryRequests {
|
|
||||||
return n, err // All retries exhausted
|
|
||||||
}
|
|
||||||
|
|
||||||
if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) {
|
|
||||||
continue
|
|
||||||
// Loop around and try to get and read from new stream.
|
|
||||||
}
|
|
||||||
return n, err // Not retryable, just return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *retryReader) Close() error {
|
|
||||||
if s.response != nil && s.response.Body != nil {
|
|
||||||
return s.response.Body.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set.
|
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||||
type HTTPAccessConditions struct {
|
type ModifiedAccessConditions struct {
|
||||||
IfModifiedSince time.Time
|
IfModifiedSince time.Time
|
||||||
IfUnmodifiedSince time.Time
|
IfUnmodifiedSince time.Time
|
||||||
IfMatch ETag
|
IfMatch ETag
|
||||||
|
@ -13,7 +13,7 @@ type HTTPAccessConditions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||||
func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
||||||
if !ac.IfModifiedSince.IsZero() {
|
if !ac.IfModifiedSince.IsZero() {
|
||||||
ims = &ac.IfModifiedSince
|
ims = &ac.IfModifiedSince
|
||||||
}
|
}
|
||||||
|
@ -31,16 +31,14 @@ func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *
|
||||||
|
|
||||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
||||||
type ContainerAccessConditions struct {
|
type ContainerAccessConditions struct {
|
||||||
HTTPAccessConditions
|
ModifiedAccessConditions
|
||||||
LeaseAccessConditions
|
LeaseAccessConditions
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
||||||
type BlobAccessConditions struct {
|
type BlobAccessConditions struct {
|
||||||
HTTPAccessConditions
|
ModifiedAccessConditions
|
||||||
LeaseAccessConditions
|
LeaseAccessConditions
|
||||||
AppendBlobAccessConditions
|
|
||||||
PageBlobAccessConditions
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
|
@ -5,13 +5,12 @@ import "sync/atomic"
|
||||||
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
||||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||||
type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
||||||
|
|
||||||
|
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
|
||||||
|
|
||||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||||
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
|
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
|
||||||
if target == nil || morpher == nil {
|
|
||||||
panic("target and morpher mut not be nil")
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
currentVal := atomic.LoadInt32(target)
|
currentVal := atomic.LoadInt32(target)
|
||||||
desiredVal, morphResult := morpher(currentVal)
|
desiredVal, morphResult := morpher(currentVal)
|
||||||
|
@ -24,13 +23,10 @@ func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
|
||||||
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
||||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||||
type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
||||||
|
|
||||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||||
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} {
|
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
|
||||||
if target == nil || morpher == nil {
|
|
||||||
panic("target and morpher mut not be nil")
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
currentVal := atomic.LoadUint32(target)
|
currentVal := atomic.LoadUint32(target)
|
||||||
desiredVal, morphResult := morpher(currentVal)
|
desiredVal, morphResult := morpher(currentVal)
|
||||||
|
@ -43,13 +39,10 @@ func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{}
|
||||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||||
type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
||||||
|
|
||||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||||
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
|
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
|
||||||
if target == nil || morpher == nil {
|
|
||||||
panic("target and morpher mut not be nil")
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
currentVal := atomic.LoadInt64(target)
|
currentVal := atomic.LoadInt64(target)
|
||||||
desiredVal, morphResult := morpher(currentVal)
|
desiredVal, morphResult := morpher(currentVal)
|
||||||
|
@ -62,13 +55,10 @@ func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
|
||||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||||
type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
||||||
|
|
||||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||||
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} {
|
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
|
||||||
if target == nil || morpher == nil {
|
|
||||||
panic("target and morpher mut not be nil")
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
currentVal := atomic.LoadUint64(target)
|
currentVal := atomic.LoadUint64(target)
|
||||||
desiredVal, morphResult := morpher(currentVal)
|
desiredVal, morphResult := morpher(currentVal)
|
File diff suppressed because it is too large
Load Diff
|
@ -3,7 +3,6 @@ package azblob
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
@ -12,10 +11,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"errors"
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CommonResponseHeaders returns the headers common to all blob REST API responses.
|
// CommonResponse returns the headers common to all blob REST API responses.
|
||||||
type CommonResponse interface {
|
type CommonResponse interface {
|
||||||
// ETag returns the value for header ETag.
|
// ETag returns the value for header ETag.
|
||||||
ETag() ETag
|
ETag() ETag
|
||||||
|
@ -42,6 +43,7 @@ type UploadToBlockBlobOptions struct {
|
||||||
BlockSize int64
|
BlockSize int64
|
||||||
|
|
||||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
||||||
|
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
||||||
Progress pipeline.ProgressReceiver
|
Progress pipeline.ProgressReceiver
|
||||||
|
|
||||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
||||||
|
@ -60,17 +62,25 @@ type UploadToBlockBlobOptions struct {
|
||||||
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
||||||
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
||||||
|
bufferSize := int64(len(b))
|
||||||
// Validate parameters and set defaults
|
|
||||||
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
|
|
||||||
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
|
|
||||||
}
|
|
||||||
if o.BlockSize == 0 {
|
if o.BlockSize == 0 {
|
||||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
||||||
|
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
|
||||||
|
return nil, errors.New("Buffer is too large to upload to a block blob")
|
||||||
|
}
|
||||||
|
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
||||||
|
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||||
|
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||||
|
} else {
|
||||||
|
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
||||||
|
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
||||||
|
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||||
|
}
|
||||||
|
// StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize).
|
||||||
|
}
|
||||||
}
|
}
|
||||||
size := int64(len(b))
|
|
||||||
|
|
||||||
if size <= BlockBlobMaxUploadBlobBytes {
|
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||||
// If the size can fit in 1 Upload call, do it this way
|
// If the size can fit in 1 Upload call, do it this way
|
||||||
var body io.ReadSeeker = bytes.NewReader(b)
|
var body io.ReadSeeker = bytes.NewReader(b)
|
||||||
if o.Progress != nil {
|
if o.Progress != nil {
|
||||||
|
@ -79,10 +89,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||||
}
|
}
|
||||||
|
|
||||||
var numBlocks = uint16(((size - 1) / o.BlockSize) + 1)
|
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
|
||||||
if numBlocks > BlockBlobMaxBlocks {
|
|
||||||
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
|
|
||||||
}
|
|
||||||
|
|
||||||
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
||||||
progress := int64(0)
|
progress := int64(0)
|
||||||
|
@ -90,7 +97,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||||
|
|
||||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||||
operationName: "UploadBufferToBlockBlob",
|
operationName: "UploadBufferToBlockBlob",
|
||||||
transferSize: size,
|
transferSize: bufferSize,
|
||||||
chunkSize: o.BlockSize,
|
chunkSize: o.BlockSize,
|
||||||
parallelism: o.Parallelism,
|
parallelism: o.Parallelism,
|
||||||
operation: func(offset int64, count int64) error {
|
operation: func(offset int64, count int64) error {
|
||||||
|
@ -115,7 +122,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
||||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
||||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
||||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions)
|
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -147,10 +154,9 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File,
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
||||||
|
|
||||||
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
|
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
||||||
type DownloadFromBlobOptions struct {
|
type DownloadFromBlobOptions struct {
|
||||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
||||||
BlockSize int64
|
BlockSize int64
|
||||||
|
@ -168,32 +174,19 @@ type DownloadFromBlobOptions struct {
|
||||||
RetryReaderOptionsPerBlock RetryReaderOptions
|
RetryReaderOptionsPerBlock RetryReaderOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||||
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||||
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions,
|
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
|
||||||
initialDownloadResponse *DownloadResponse) error {
|
|
||||||
// Validate parameters, and set defaults.
|
|
||||||
if o.BlockSize < 0 {
|
|
||||||
panic("BlockSize option must be >= 0")
|
|
||||||
}
|
|
||||||
if o.BlockSize == 0 {
|
if o.BlockSize == 0 {
|
||||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset < 0 {
|
|
||||||
panic("offset option must be >= 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
if count < 0 {
|
|
||||||
panic("count option must be >= 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == CountToEnd { // If size not specified, calculate it
|
if count == CountToEnd { // If size not specified, calculate it
|
||||||
if initialDownloadResponse != nil {
|
if initialDownloadResponse != nil {
|
||||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
||||||
} else {
|
} else {
|
||||||
// If we don't have the length at all, get it
|
// If we don't have the length at all, get it
|
||||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false)
|
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -201,10 +194,6 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(len(b)) < count {
|
|
||||||
panic(fmt.Errorf("the buffer's size should be equal to or larger than the request count of bytes: %d", count))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare and do parallel download.
|
// Prepare and do parallel download.
|
||||||
progress := int64(0)
|
progress := int64(0)
|
||||||
progressLock := &sync.Mutex{}
|
progressLock := &sync.Mutex{}
|
||||||
|
@ -215,7 +204,10 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||||
chunkSize: o.BlockSize,
|
chunkSize: o.BlockSize,
|
||||||
parallelism: o.Parallelism,
|
parallelism: o.Parallelism,
|
||||||
operation: func(chunkStart int64, count int64) error {
|
operation: func(chunkStart int64, count int64) error {
|
||||||
dr, err := blobURL.Download(ctx, chunkStart+ offset, count, ac, false)
|
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
||||||
if o.Progress != nil {
|
if o.Progress != nil {
|
||||||
rangeProgress := int64(0)
|
rangeProgress := int64(0)
|
||||||
|
@ -241,29 +233,24 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||||
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions) error {
|
b []byte, o DownloadFromBlobOptions) error {
|
||||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, ac, b, o, nil)
|
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadBlobToFile downloads an Azure file to a local file.
|
// DownloadBlobToFile downloads an Azure blob to a local file.
|
||||||
// The file would be truncated if the size doesn't match.
|
// The file would be truncated if the size doesn't match.
|
||||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||||
ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error {
|
file *os.File, o DownloadFromBlobOptions) error {
|
||||||
// 1. Validate parameters.
|
// 1. Calculate the size of the destination file
|
||||||
if file == nil {
|
|
||||||
panic("file must not be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Calculate the size of the destination file
|
|
||||||
var size int64
|
var size int64
|
||||||
|
|
||||||
if count == CountToEnd {
|
if count == CountToEnd {
|
||||||
// Try to get Azure file's size
|
// Try to get Azure blob's size
|
||||||
props, err := blobURL.GetProperties(ctx, ac)
|
props, err := blobURL.GetProperties(ctx, o.AccessConditions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -272,7 +259,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
|
||||||
size = count
|
size = count
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
|
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
||||||
stat, err := file.Stat()
|
stat, err := file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -284,19 +271,18 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
|
||||||
}
|
}
|
||||||
|
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
// 4. Set mmap and call DownloadAzureFileToBuffer.
|
// 3. Set mmap and call downloadBlobToBuffer.
|
||||||
m, err := newMMF(file, true, 0, int(size))
|
m, err := newMMF(file, true, 0, int(size))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer m.unmap()
|
defer m.unmap()
|
||||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, ac, m, o, nil)
|
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
|
||||||
} else { // if the blob's size is 0, there is no need in downloading it
|
} else { // if the blob's size is 0, there is no need in downloading it
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// BatchTransferOptions identifies options used by doBatchTransfer.
|
// BatchTransferOptions identifies options used by doBatchTransfer.
|
||||||
|
@ -374,7 +360,10 @@ func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL
|
||||||
result, err := uploadStream(ctx, reader,
|
result, err := uploadStream(ctx, reader,
|
||||||
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
|
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
|
||||||
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
|
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
|
||||||
return result.(CommonResponse), err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result.(CommonResponse), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type uploadStreamToBlockBlobOptions struct {
|
type uploadStreamToBlockBlobOptions struct {
|
||||||
|
@ -390,13 +379,17 @@ func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
|
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
|
||||||
if num == 0 && len(buffer) < t.o.BufferSize {
|
if num == 0 {
|
||||||
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
|
|
||||||
t.firstBlock = buffer
|
t.firstBlock = buffer
|
||||||
|
|
||||||
|
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
|
||||||
|
// If the payload is exactly the same size as the buffer, there may be more content coming in.
|
||||||
|
if len(buffer) < t.o.BufferSize {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Else, upload a staged block...
|
// Else, upload a staged block...
|
||||||
AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
||||||
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
||||||
if startVal < num {
|
if startVal < num {
|
||||||
return num, nil
|
return num, nil
|
||||||
|
@ -404,12 +397,14 @@ func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32,
|
||||||
return startVal, nil
|
return startVal, nil
|
||||||
})
|
})
|
||||||
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
|
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
|
||||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{})
|
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
|
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
|
||||||
if t.maxBlockNum == 0 {
|
// If the first block had the exact same size as the buffer
|
||||||
|
// we would have staged it as a block thinking that there might be more data coming
|
||||||
|
if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize {
|
||||||
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
|
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
|
||||||
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
|
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
|
||||||
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
||||||
|
@ -436,7 +431,28 @@ type UploadStreamOptions struct {
|
||||||
BufferSize int
|
BufferSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type firstErr struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
finalError error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fe *firstErr) set(err error) {
|
||||||
|
fe.lock.Lock()
|
||||||
|
if fe.finalError == nil {
|
||||||
|
fe.finalError = err
|
||||||
|
}
|
||||||
|
fe.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fe *firstErr) get() (err error) {
|
||||||
|
fe.lock.Lock()
|
||||||
|
err = fe.finalError
|
||||||
|
fe.lock.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
|
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
|
||||||
|
firstErr := firstErr{}
|
||||||
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
||||||
defer cancel()
|
defer cancel()
|
||||||
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
||||||
|
@ -463,9 +479,12 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||||
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
|
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
|
||||||
wg.Done() // Indicate this buffer was sent
|
wg.Done() // Indicate this buffer was sent
|
||||||
if nil != err {
|
if nil != err {
|
||||||
|
// NOTE: finalErr could be assigned to multiple times here which is OK,
|
||||||
|
// some error will be returned.
|
||||||
|
firstErr.set(err)
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now
|
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -490,7 +509,7 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||||
buffer = <-incoming
|
buffer = <-incoming
|
||||||
}
|
}
|
||||||
n, err := io.ReadFull(reader, buffer)
|
n, err := io.ReadFull(reader, buffer)
|
||||||
if err != nil {
|
if err != nil { // Less than len(buffer) bytes were read
|
||||||
buffer = buffer[:n] // Make slice match the # of read bytes
|
buffer = buffer[:n] // Make slice match the # of read bytes
|
||||||
}
|
}
|
||||||
if len(buffer) > 0 {
|
if len(buffer) > 0 {
|
||||||
|
@ -499,12 +518,21 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||||
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
|
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
|
||||||
}
|
}
|
||||||
if err != nil { // The reader is done, no more outgoing buffers
|
if err != nil { // The reader is done, no more outgoing buffers
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
|
||||||
|
} else {
|
||||||
|
firstErr.set(err)
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
||||||
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
||||||
wg.Wait() // Wait for all pending outgoing messages to complete
|
wg.Wait() // Wait for all pending outgoing messages to complete
|
||||||
// After all blocks uploaded, commit them to the blob & return the result
|
err := firstErr.get()
|
||||||
|
if err == nil {
|
||||||
|
// If no error, after all blocks uploaded, commit them to the blob & return the result
|
||||||
return t.end(ctx)
|
return t.end(ctx)
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package azblob
|
package azblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
@ -15,7 +16,8 @@ const (
|
||||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||||
type BlobURLParts struct {
|
type BlobURLParts struct {
|
||||||
Scheme string // Ex: "https://"
|
Scheme string // Ex: "https://"
|
||||||
Host string // Ex: "account.blob.core.windows.net"
|
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||||
|
IPEndpointStyleInfo IPEndpointStyleInfo
|
||||||
ContainerName string // "" if no container
|
ContainerName string // "" if no container
|
||||||
BlobName string // "" if no blob
|
BlobName string // "" if no blob
|
||||||
Snapshot string // "" if not a snapshot
|
Snapshot string // "" if not a snapshot
|
||||||
|
@ -23,6 +25,31 @@ type BlobURLParts struct {
|
||||||
UnparsedParams string
|
UnparsedParams string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
||||||
|
// Ex: "https://10.132.141.33/accountname/containername"
|
||||||
|
type IPEndpointStyleInfo struct {
|
||||||
|
AccountName string // "" if not using IP endpoint style
|
||||||
|
}
|
||||||
|
|
||||||
|
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||||
|
// http(s)://IP(:port)/storageaccount/container/...
|
||||||
|
// As url's Host property, host could be both host or host:port
|
||||||
|
func isIPEndpointStyle(host string) bool {
|
||||||
|
if host == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||||
|
host = h
|
||||||
|
}
|
||||||
|
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||||
|
// In this case, eliminate the '[' and ']' in the URL.
|
||||||
|
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||||
|
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||||
|
host = host[1 : len(host)-1]
|
||||||
|
}
|
||||||
|
return net.ParseIP(host) != nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
||||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
||||||
func NewBlobURLParts(u url.URL) BlobURLParts {
|
func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||||
|
@ -37,9 +64,16 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||||
if path[0] == '/' {
|
if path[0] == '/' {
|
||||||
path = path[1:] // If path starts with a slash, remove it
|
path = path[1:] // If path starts with a slash, remove it
|
||||||
}
|
}
|
||||||
|
if isIPEndpointStyle(up.Host) {
|
||||||
|
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
||||||
|
up.IPEndpointStyleInfo.AccountName = path
|
||||||
|
} else {
|
||||||
|
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||||
|
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Find the next slash (if it exists)
|
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
||||||
containerEndIndex := strings.Index(path, "/")
|
|
||||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||||
up.ContainerName = path
|
up.ContainerName = path
|
||||||
} else {
|
} else {
|
||||||
|
@ -77,6 +111,9 @@ func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
||||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||||
func (up BlobURLParts) URL() url.URL {
|
func (up BlobURLParts) URL() url.URL {
|
||||||
path := ""
|
path := ""
|
||||||
|
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
||||||
|
path += "/" + up.IPEndpointStyleInfo.AccountName
|
||||||
|
}
|
||||||
// Concatenate container & blob names (if they exist)
|
// Concatenate container & blob names (if they exist)
|
||||||
if up.ContainerName != "" {
|
if up.ContainerName != "" {
|
||||||
path += "/" + up.ContainerName
|
path += "/" + up.ContainerName
|
||||||
|
@ -87,6 +124,11 @@ func (up BlobURLParts) URL() url.URL {
|
||||||
|
|
||||||
rawQuery := up.UnparsedParams
|
rawQuery := up.UnparsedParams
|
||||||
|
|
||||||
|
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
|
||||||
|
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() {
|
||||||
|
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
|
||||||
|
}
|
||||||
|
|
||||||
// Concatenate blob snapshot query parameter (if it exists)
|
// Concatenate blob snapshot query parameter (if it exists)
|
||||||
if up.Snapshot != "" {
|
if up.Snapshot != "" {
|
||||||
if len(rawQuery) > 0 {
|
if len(rawQuery) > 0 {
|
|
@ -8,11 +8,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
||||||
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
||||||
type BlobSASSignatureValues struct {
|
type BlobSASSignatureValues struct {
|
||||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||||
|
SnapshotTime time.Time
|
||||||
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
||||||
IPRange IPRange `param:"sip"`
|
IPRange IPRange `param:"sip"`
|
||||||
Identifier string `param:"si"`
|
Identifier string `param:"si"`
|
||||||
|
@ -25,19 +27,28 @@ type BlobSASSignatureValues struct {
|
||||||
ContentType string // rsct
|
ContentType string // rsct
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
|
||||||
// the proper SAS query parameters.
|
// the proper SAS query parameters.
|
||||||
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
|
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
|
||||||
if sharedKeyCredential == nil {
|
func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) {
|
||||||
panic("sharedKeyCredential can't be nil")
|
resource := "c"
|
||||||
|
if credential == nil {
|
||||||
|
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource := "c"
|
if !v.SnapshotTime.IsZero() {
|
||||||
if v.BlobName == "" {
|
resource = "bs"
|
||||||
|
//Make sure the permission characters are in the correct order
|
||||||
|
perms := &BlobSASPermissions{}
|
||||||
|
if err := perms.Parse(v.Permissions); err != nil {
|
||||||
|
return SASQueryParameters{}, err
|
||||||
|
}
|
||||||
|
v.Permissions = perms.String()
|
||||||
|
} else if v.BlobName == "" {
|
||||||
// Make sure the permission characters are in the correct order
|
// Make sure the permission characters are in the correct order
|
||||||
perms := &ContainerSASPermissions{}
|
perms := &ContainerSASPermissions{}
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
if err := perms.Parse(v.Permissions); err != nil {
|
||||||
panic(err)
|
return SASQueryParameters{}, err
|
||||||
}
|
}
|
||||||
v.Permissions = perms.String()
|
v.Permissions = perms.String()
|
||||||
} else {
|
} else {
|
||||||
|
@ -45,32 +56,54 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
|
||||||
// Make sure the permission characters are in the correct order
|
// Make sure the permission characters are in the correct order
|
||||||
perms := &BlobSASPermissions{}
|
perms := &BlobSASPermissions{}
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
if err := perms.Parse(v.Permissions); err != nil {
|
||||||
panic(err)
|
return SASQueryParameters{}, err
|
||||||
}
|
}
|
||||||
v.Permissions = perms.String()
|
v.Permissions = perms.String()
|
||||||
}
|
}
|
||||||
if v.Version == "" {
|
if v.Version == "" {
|
||||||
v.Version = SASVersion
|
v.Version = SASVersion
|
||||||
}
|
}
|
||||||
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
|
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
|
||||||
|
|
||||||
|
signedIdentifier := v.Identifier
|
||||||
|
|
||||||
|
udk := credential.getUDKParams()
|
||||||
|
|
||||||
|
if udk != nil {
|
||||||
|
udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{})
|
||||||
|
//I don't like this answer to combining the functions
|
||||||
|
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
|
||||||
|
signedIdentifier = strings.Join([]string{
|
||||||
|
udk.SignedOid,
|
||||||
|
udk.SignedTid,
|
||||||
|
udkStart,
|
||||||
|
udkExpiry,
|
||||||
|
udk.SignedService,
|
||||||
|
udk.SignedVersion,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
stringToSign := strings.Join([]string{
|
stringToSign := strings.Join([]string{
|
||||||
v.Permissions,
|
v.Permissions,
|
||||||
startTime,
|
startTime,
|
||||||
expiryTime,
|
expiryTime,
|
||||||
getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName),
|
getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName),
|
||||||
v.Identifier,
|
signedIdentifier,
|
||||||
v.IPRange.String(),
|
v.IPRange.String(),
|
||||||
string(v.Protocol),
|
string(v.Protocol),
|
||||||
v.Version,
|
v.Version,
|
||||||
|
resource,
|
||||||
|
snapshotTime, // signed timestamp
|
||||||
v.CacheControl, // rscc
|
v.CacheControl, // rscc
|
||||||
v.ContentDisposition, // rscd
|
v.ContentDisposition, // rscd
|
||||||
v.ContentEncoding, // rsce
|
v.ContentEncoding, // rsce
|
||||||
v.ContentLanguage, // rscl
|
v.ContentLanguage, // rscl
|
||||||
v.ContentType}, // rsct
|
v.ContentType}, // rsct
|
||||||
"\n")
|
"\n")
|
||||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
|
|
||||||
|
signature := ""
|
||||||
|
signature = credential.ComputeHMACSHA256(stringToSign)
|
||||||
|
|
||||||
p := SASQueryParameters{
|
p := SASQueryParameters{
|
||||||
// Common SAS parameters
|
// Common SAS parameters
|
||||||
|
@ -84,11 +117,28 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
|
||||||
// Container/Blob-specific SAS parameters
|
// Container/Blob-specific SAS parameters
|
||||||
resource: resource,
|
resource: resource,
|
||||||
identifier: v.Identifier,
|
identifier: v.Identifier,
|
||||||
|
cacheControl: v.CacheControl,
|
||||||
|
contentDisposition: v.ContentDisposition,
|
||||||
|
contentEncoding: v.ContentEncoding,
|
||||||
|
contentLanguage: v.ContentLanguage,
|
||||||
|
contentType: v.ContentType,
|
||||||
|
snapshotTime: v.SnapshotTime,
|
||||||
|
|
||||||
// Calculated SAS signature
|
// Calculated SAS signature
|
||||||
signature: signature,
|
signature: signature,
|
||||||
}
|
}
|
||||||
return p
|
|
||||||
|
//User delegation SAS specific parameters
|
||||||
|
if udk != nil {
|
||||||
|
p.signedOid = udk.SignedOid
|
||||||
|
p.signedTid = udk.SignedTid
|
||||||
|
p.signedStart = udk.SignedStart
|
||||||
|
p.signedExpiry = udk.SignedExpiry
|
||||||
|
p.signedService = udk.SignedService
|
||||||
|
p.signedVersion = udk.SignedVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
Normal file
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package azblob
|
||||||
|
|
||||||
|
// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential
|
||||||
|
type StorageAccountCredential interface {
|
||||||
|
AccountName() string
|
||||||
|
ComputeHMACSHA256(message string) (base64String string)
|
||||||
|
getUDKParams() *UserDelegationKey
|
||||||
|
}
|
|
@ -45,7 +45,7 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
||||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||||
return ab.abClient.Create(ctx, 0, nil,
|
return ab.abClient.Create(ctx, 0, nil,
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||||
|
@ -56,17 +56,39 @@ func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata
|
||||||
// This method panics if the stream is not at position 0.
|
// This method panics if the stream is not at position 0.
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
// Note that the http client closes the body stream after the request is sent to the service.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
||||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobAppendBlockResponse, error) {
|
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers()
|
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||||
return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
|
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||||
ac.LeaseAccessConditions.pointers(),
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ab.abClient.AppendBlock(ctx, body, count, nil,
|
||||||
|
transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendBlobAccessConditions identifies append blob-specific access conditions which you optionally set.
|
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
||||||
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
||||||
|
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||||
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
||||||
|
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||||
|
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers()
|
||||||
|
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
||||||
|
transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||||
|
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||||
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||||
|
}
|
||||||
|
|
||||||
type AppendBlobAccessConditions struct {
|
type AppendBlobAccessConditions struct {
|
||||||
|
ModifiedAccessConditions
|
||||||
|
LeaseAccessConditions
|
||||||
|
AppendPositionAccessConditions
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||||
|
type AppendPositionAccessConditions struct {
|
||||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
||||||
// only if the append position is equal to a value.
|
// only if the append position is equal to a value.
|
||||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
||||||
|
@ -83,13 +105,7 @@ type AppendBlobAccessConditions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||||
func (ac AppendBlobAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
||||||
if ac.IfAppendPositionEqual < -1 {
|
|
||||||
panic("IfAppendPositionEqual can't be less than -1")
|
|
||||||
}
|
|
||||||
if ac.IfMaxSizeLessThanOrEqual < -1 {
|
|
||||||
panic("IfMaxSizeLessThanOrEqual can't be less than -1")
|
|
||||||
}
|
|
||||||
var zero int64 // defaults to 0
|
var zero int64 // defaults to 0
|
||||||
switch ac.IfAppendPositionEqual {
|
switch ac.IfAppendPositionEqual {
|
||||||
case -1:
|
case -1:
|
|
@ -14,9 +14,6 @@ type BlobURL struct {
|
||||||
|
|
||||||
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
||||||
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
|
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
blobClient := newBlobClient(url, p)
|
blobClient := newBlobClient(url, p)
|
||||||
return BlobURL{blobClient: blobClient}
|
return BlobURL{blobClient: blobClient}
|
||||||
}
|
}
|
||||||
|
@ -34,9 +31,6 @@ func (b BlobURL) String() string {
|
||||||
|
|
||||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
||||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
return NewBlobURL(b.blobClient.URL(), p)
|
return NewBlobURL(b.blobClient.URL(), p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,13 +58,14 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
||||||
|
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
||||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
|
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
|
||||||
var xRangeGetContentMD5 *bool
|
var xRangeGetContentMD5 *bool
|
||||||
if rangeGetContentMD5 {
|
if rangeGetContentMD5 {
|
||||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
xRangeGetContentMD5 = &rangeGetContentMD5
|
||||||
}
|
}
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
dr, err := b.blobClient.Download(ctx, nil, nil,
|
dr, err := b.blobClient.Download(ctx, nil, nil,
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
httpRange{offset: offset, count: count}.pointers(),
|
||||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
|
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
|
||||||
|
@ -90,7 +85,7 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
|
||||||
// Note that deleting a blob also deletes all its snapshots.
|
// Note that deleting a blob also deletes all its snapshots.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
||||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
@ -107,14 +102,14 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
|
||||||
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
|
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
|
||||||
// does not update the blob's ETag.
|
// does not update the blob's ETag.
|
||||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
||||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTierResponse, error) {
|
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
|
||||||
return b.blobClient.SetTier(ctx, tier, nil, nil)
|
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlobProperties returns the blob's properties.
|
// GetBlobProperties returns the blob's properties.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
||||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
@ -122,7 +117,7 @@ func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*B
|
||||||
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
||||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
||||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||||
|
@ -132,7 +127,7 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
|
||||||
// SetBlobMetadata changes a blob's metadata.
|
// SetBlobMetadata changes a blob's metadata.
|
||||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
||||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
@ -143,14 +138,14 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
|
||||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
||||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||||
// performance hit.
|
// performance hit.
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||||
// 15 to 60 seconds, or infinite (-1).
|
// 15 to 60 seconds, or infinite (-1).
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
|
@ -158,7 +153,7 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i
|
||||||
|
|
||||||
// RenewLease renews the blob's previously-acquired lease.
|
// RenewLease renews the blob's previously-acquired lease.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobRenewLeaseResponse, error) {
|
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||||
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
|
@ -166,7 +161,7 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessCo
|
||||||
|
|
||||||
// ReleaseLease releases the blob's previously-acquired lease.
|
// ReleaseLease releases the blob's previously-acquired lease.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
|
@ -175,7 +170,7 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccess
|
||||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
||||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobBreakLeaseResponse, error) {
|
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
|
@ -183,7 +178,7 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac
|
||||||
|
|
||||||
// ChangeLease changes the blob's lease ID.
|
// ChangeLease changes the blob's lease ID.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobChangeLeaseResponse, error) {
|
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
||||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
|
@ -201,10 +196,9 @@ func leasePeriodPointer(period int32) (p *int32) {
|
||||||
|
|
||||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
// StartCopyFromURL copies the data at the source URL to a blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
||||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
||||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
|
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
|
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||||
srcLeaseID := srcac.LeaseAccessConditions.pointers()
|
|
||||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||||
|
|
||||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
||||||
|
@ -212,7 +206,7 @@ func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata
|
||||||
srcIfMatchETag, srcIfNoneMatchETag,
|
srcIfMatchETag, srcIfNoneMatchETag,
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||||
dstIfMatchETag, dstIfNoneMatchETag,
|
dstIfMatchETag, dstIfNoneMatchETag,
|
||||||
dstLeaseID, srcLeaseID, nil)
|
dstLeaseID, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
|
@ -12,7 +12,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
||||||
|
|
||||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
||||||
|
@ -30,9 +30,6 @@ type BlockBlobURL struct {
|
||||||
|
|
||||||
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
||||||
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
|
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
blobClient := newBlobClient(url, p)
|
blobClient := newBlobClient(url, p)
|
||||||
bbClient := newBlockBlobClient(url, p)
|
bbClient := newBlockBlobClient(url, p)
|
||||||
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
|
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
|
||||||
|
@ -59,8 +56,12 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
// Note that the http client closes the body stream after the request is sent to the service.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
|
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
|
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bb.bbClient.Upload(ctx, body, count, nil,
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
|
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
|
||||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||||
|
@ -70,16 +71,20 @@ func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTT
|
||||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
// Note that the http client closes the body stream after the request is sent to the service.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
||||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobStageBlockResponse, error) {
|
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
|
||||||
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil)
|
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||||
sourceURLStr := sourceURL.String()
|
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, &sourceURLStr, httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
|
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||||
|
@ -90,7 +95,7 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
||||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
|
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
|
||||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
||||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
|
@ -3,6 +3,7 @@ package azblob
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
@ -16,9 +17,6 @@ type ContainerURL struct {
|
||||||
|
|
||||||
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
||||||
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
|
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
client := newContainerClient(url, p)
|
client := newContainerClient(url, p)
|
||||||
return ContainerURL{client: client}
|
return ContainerURL{client: client}
|
||||||
}
|
}
|
||||||
|
@ -89,10 +87,10 @@ func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAcces
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
||||||
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
|
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
|
||||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||||
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||||
}
|
}
|
||||||
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
||||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
@ -109,9 +107,9 @@ func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessCondition
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
||||||
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
|
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
|
||||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||||
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
||||||
}
|
}
|
||||||
ifModifiedSince, _, _, _ := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,16 +179,16 @@ func (p *AccessPolicyPermission) Parse(s string) error {
|
||||||
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
|
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
|
||||||
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
|
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
|
||||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||||
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||||
}
|
}
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
||||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
|
@ -198,28 +196,28 @@ func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, durat
|
||||||
|
|
||||||
// RenewLease renews the container's previously-acquired lease.
|
// RenewLease renews the container's previously-acquired lease.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReleaseLease releases the container's previously-acquired lease.
|
// ReleaseLease releases the container's previously-acquired lease.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeLease changes the container's lease ID.
|
// ChangeLease changes the container's lease ID.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||||
}
|
}
|
||||||
|
@ -231,7 +229,7 @@ func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedI
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
||||||
prefix, include, maxResults := o.pointers()
|
prefix, include, maxResults := o.pointers()
|
||||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||||
|
@ -241,10 +239,10 @@ func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||||
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
|
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
|
||||||
if o.Details.Snapshots {
|
if o.Details.Snapshots {
|
||||||
panic("snapshots are not supported in this listing operation")
|
return nil, errors.New("snapshots are not supported in this listing operation")
|
||||||
}
|
}
|
||||||
prefix, include, maxResults := o.pointers()
|
prefix, include, maxResults := o.pointers()
|
||||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
|
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||||
|
@ -264,9 +262,6 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob
|
||||||
}
|
}
|
||||||
include = o.Details.slice()
|
include = o.Details.slice()
|
||||||
if o.MaxResults != 0 {
|
if o.MaxResults != 0 {
|
||||||
if o.MaxResults < 0 {
|
|
||||||
panic("MaxResults must be >= 0")
|
|
||||||
}
|
|
||||||
maxResults = &o.MaxResults
|
maxResults = &o.MaxResults
|
||||||
}
|
}
|
||||||
return
|
return
|
|
@ -26,9 +26,6 @@ type PageBlobURL struct {
|
||||||
|
|
||||||
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
||||||
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
|
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
blobClient := newBlobClient(url, p)
|
blobClient := newBlobClient(url, p)
|
||||||
pbClient := newPageBlobClient(url, p)
|
pbClient := newPageBlobClient(url, p)
|
||||||
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
|
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
|
||||||
|
@ -47,39 +44,54 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
||||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
||||||
if sequenceNumber < 0 {
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
panic("sequenceNumber must be greater than or equal to 0")
|
return pb.pbClient.Create(ctx, 0, size, nil,
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
|
||||||
return pb.pbClient.Create(ctx, 0, nil,
|
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
||||||
metadata, ac.LeaseAccessConditions.pointers(),
|
metadata, ac.LeaseAccessConditions.pointers(),
|
||||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil)
|
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||||
// This method panics if the stream is not at position 0.
|
// This method panics if the stream is not at position 0.
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
// Note that the http client closes the body stream after the request is sent to the service.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobUploadPagesResponse, error) {
|
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
|
||||||
count := validateSeekableStreamAt0AndGetCount(body)
|
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
if err != nil {
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
|
return nil, err
|
||||||
return pb.pbClient.UploadPages(ctx, body, count, nil,
|
}
|
||||||
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
|
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||||
|
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
|
||||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||||
ac.LeaseAccessConditions.pointers(),
|
ac.LeaseAccessConditions.pointers(),
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
||||||
|
// The sourceOffset specifies the start offset of source data to copy from.
|
||||||
|
// The destOffset specifies the start offset of data in page blob will be written to.
|
||||||
|
// The count must be a multiple of 512 bytes.
|
||||||
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
||||||
|
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||||
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
||||||
|
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||||
|
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
|
||||||
|
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
||||||
|
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||||
|
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||||
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||||
|
}
|
||||||
|
|
||||||
// ClearPages frees the specified pages from the page blob.
|
// ClearPages frees the specified pages from the page blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
|
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
return pb.pbClient.ClearPages(ctx, 0, nil,
|
||||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||||
ac.LeaseAccessConditions.pointers(),
|
ac.LeaseAccessConditions.pointers(),
|
||||||
|
@ -90,7 +102,7 @@ func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64,
|
||||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
httpRange{offset: offset, count: count}.pointers(),
|
||||||
ac.LeaseAccessConditions.pointers(),
|
ac.LeaseAccessConditions.pointers(),
|
||||||
|
@ -100,7 +112,7 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
|
||||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
httpRange{offset: offset, count: count}.pointers(),
|
||||||
ac.LeaseAccessConditions.pointers(),
|
ac.LeaseAccessConditions.pointers(),
|
||||||
|
@ -111,10 +123,7 @@ func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count
|
||||||
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
|
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
|
||||||
if size%PageBlobPageBytes != 0 {
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
panic("Size must be a multiple of PageBlobPageBytes (512)")
|
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
|
||||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
@ -122,14 +131,11 @@ func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessCondi
|
||||||
// SetSequenceNumber sets the page blob's sequence number.
|
// SetSequenceNumber sets the page blob's sequence number.
|
||||||
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
|
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
|
||||||
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
|
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
|
||||||
if sequenceNumber < 0 {
|
|
||||||
panic("sequenceNumber must be greater than or equal to 0")
|
|
||||||
}
|
|
||||||
sn := &sequenceNumber
|
sn := &sequenceNumber
|
||||||
if action == SequenceNumberActionIncrement {
|
if action == SequenceNumberActionIncrement {
|
||||||
sn = nil
|
sn = nil
|
||||||
}
|
}
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
||||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
||||||
sn, nil)
|
sn, nil)
|
||||||
|
@ -141,37 +147,28 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
||||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
||||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||||
qp := source.Query()
|
qp := source.Query()
|
||||||
qp.Set("snapshot", snapshot)
|
qp.Set("snapshot", snapshot)
|
||||||
source.RawQuery = qp.Encode()
|
source.RawQuery = qp.Encode()
|
||||||
return pb.pbClient.CopyIncremental(ctx, source.String(), nil, nil,
|
return pb.pbClient.CopyIncremental(ctx, source.String(), nil,
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pr PageRange) pointers() *string {
|
func (pr PageRange) pointers() *string {
|
||||||
if pr.Start < 0 {
|
|
||||||
panic("PageRange's Start value must be greater than or equal to 0")
|
|
||||||
}
|
|
||||||
if pr.End <= 0 {
|
|
||||||
panic("PageRange's End value must be greater than 0")
|
|
||||||
}
|
|
||||||
if pr.Start%PageBlobPageBytes != 0 {
|
|
||||||
panic("PageRange's Start value must be a multiple of 512")
|
|
||||||
}
|
|
||||||
if pr.End%PageBlobPageBytes != (PageBlobPageBytes - 1) {
|
|
||||||
panic("PageRange's End value must be 1 less than a multiple of 512")
|
|
||||||
}
|
|
||||||
if pr.End <= pr.Start {
|
|
||||||
panic("PageRange's End value must be after the start")
|
|
||||||
}
|
|
||||||
endOffset := strconv.FormatInt(int64(pr.End), 10)
|
endOffset := strconv.FormatInt(int64(pr.End), 10)
|
||||||
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
|
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
|
||||||
return &asString
|
return &asString
|
||||||
}
|
}
|
||||||
|
|
||||||
// PageBlobAccessConditions identifies page blob-specific access conditions which you optionally set.
|
|
||||||
type PageBlobAccessConditions struct {
|
type PageBlobAccessConditions struct {
|
||||||
|
ModifiedAccessConditions
|
||||||
|
LeaseAccessConditions
|
||||||
|
SequenceNumberAccessConditions
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||||
|
type SequenceNumberAccessConditions struct {
|
||||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
||||||
// only if the blob's sequence number is less than a value.
|
// only if the blob's sequence number is less than a value.
|
||||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
||||||
|
@ -195,17 +192,7 @@ type PageBlobAccessConditions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||||
func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
||||||
if ac.IfSequenceNumberLessThan < -1 {
|
|
||||||
panic("Ifsequencenumberlessthan can't be less than -1")
|
|
||||||
}
|
|
||||||
if ac.IfSequenceNumberLessThanOrEqual < -1 {
|
|
||||||
panic("IfSequenceNumberLessThanOrEqual can't be less than -1")
|
|
||||||
}
|
|
||||||
if ac.IfSequenceNumberEqual < -1 {
|
|
||||||
panic("IfSequenceNumberEqual can't be less than -1")
|
|
||||||
}
|
|
||||||
|
|
||||||
var zero int64 // Defaults to 0
|
var zero int64 // Defaults to 0
|
||||||
switch ac.IfSequenceNumberLessThan {
|
switch ac.IfSequenceNumberLessThan {
|
||||||
case -1:
|
case -1:
|
|
@ -23,13 +23,21 @@ type ServiceURL struct {
|
||||||
|
|
||||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
||||||
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
|
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
|
||||||
if p == nil {
|
|
||||||
panic("p can't be nil")
|
|
||||||
}
|
|
||||||
client := newServiceClient(primaryURL, p)
|
client := newServiceClient(primaryURL, p)
|
||||||
return ServiceURL{client: client}
|
return ServiceURL{client: client}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
|
||||||
|
//OAuth is required for this call, as well as any role that can delegate access to the storage account.
|
||||||
|
func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) {
|
||||||
|
sc := newServiceClient(s.client.url, s.client.p)
|
||||||
|
udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID)
|
||||||
|
if err != nil {
|
||||||
|
return UserDelegationCredential{}, err
|
||||||
|
}
|
||||||
|
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil
|
||||||
|
}
|
||||||
|
|
||||||
// URL returns the URL endpoint used by the ServiceURL object.
|
// URL returns the URL endpoint used by the ServiceURL object.
|
||||||
func (s ServiceURL) URL() url.URL {
|
func (s ServiceURL) URL() url.URL {
|
||||||
return s.client.URL()
|
return s.client.URL()
|
||||||
|
@ -81,9 +89,9 @@ func appendToURLPath(u url.URL, name string) url.URL {
|
||||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
||||||
// previously-returned Marker) to get the next segment. For more information, see
|
// previously-returned Marker) to get the next segment. For more information, see
|
||||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) {
|
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||||
prefix, include, maxResults := o.pointers()
|
prefix, include, maxResults := o.pointers()
|
||||||
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListContainersOptions defines options available when calling ListContainers.
|
// ListContainersOptions defines options available when calling ListContainers.
|
||||||
|
@ -99,9 +107,6 @@ func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListC
|
||||||
prefix = &o.Prefix
|
prefix = &o.Prefix
|
||||||
}
|
}
|
||||||
if o.MaxResults != 0 {
|
if o.MaxResults != 0 {
|
||||||
if o.MaxResults < 0 {
|
|
||||||
panic("MaxResults must be >= 0")
|
|
||||||
}
|
|
||||||
maxResults = &o.MaxResults
|
maxResults = &o.MaxResults
|
||||||
}
|
}
|
||||||
include = ListContainersIncludeType(o.Detail.string())
|
include = ListContainersIncludeType(o.Detail.string())
|
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
Normal file
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package azblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it
|
||||||
|
func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential {
|
||||||
|
return UserDelegationCredential{
|
||||||
|
accountName: accountName,
|
||||||
|
accountKey: key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserDelegationCredential struct {
|
||||||
|
accountName string
|
||||||
|
accountKey UserDelegationKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountName returns the Storage account's name
|
||||||
|
func (f UserDelegationCredential) AccountName() string {
|
||||||
|
return f.accountName
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeHMAC
|
||||||
|
func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||||
|
bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value)
|
||||||
|
h := hmac.New(sha256.New, bytes)
|
||||||
|
h.Write([]byte(message))
|
||||||
|
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Private method to return important parameters for NewSASQueryParameters
|
||||||
|
func (f UserDelegationCredential) getUDKParams() *UserDelegationKey {
|
||||||
|
return &f.accountKey
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
package azblob
|
||||||
|
|
||||||
|
const serviceLibVersion = "0.7"
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -17,12 +18,12 @@ import (
|
||||||
|
|
||||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||||
// storage account's name and either its primary or secondary key.
|
// storage account's name and either its primary or secondary key.
|
||||||
func NewSharedKeyCredential(accountName, accountKey string) *SharedKeyCredential {
|
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return &SharedKeyCredential{}, err
|
||||||
}
|
}
|
||||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}
|
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||||
|
@ -38,6 +39,15 @@ func (f SharedKeyCredential) AccountName() string {
|
||||||
return f.accountName
|
return f.accountName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f SharedKeyCredential) getAccountKey() []byte {
|
||||||
|
return f.accountKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// noop function to satisfy StorageAccountCredential interface
|
||||||
|
func (f SharedKeyCredential) getUDKParams() *UserDelegationKey {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// New creates a credential policy object.
|
// New creates a credential policy object.
|
||||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||||
|
@ -45,7 +55,10 @@ func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptio
|
||||||
if d := request.Header.Get(headerXmsDate); d == "" {
|
if d := request.Header.Get(headerXmsDate); d == "" {
|
||||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
||||||
}
|
}
|
||||||
stringToSign := f.buildStringToSign(request)
|
stringToSign, err := f.buildStringToSign(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
signature := f.ComputeHMACSHA256(stringToSign)
|
signature := f.ComputeHMACSHA256(stringToSign)
|
||||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
||||||
request.Header[headerAuthorization] = []string{authHeader}
|
request.Header[headerAuthorization] = []string{authHeader}
|
||||||
|
@ -84,13 +97,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
||||||
func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||||
h := hmac.New(sha256.New, f.accountKey)
|
h := hmac.New(sha256.New, f.accountKey)
|
||||||
h.Write([]byte(message))
|
h.Write([]byte(message))
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string {
|
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) {
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||||
headers := request.Header
|
headers := request.Header
|
||||||
contentLength := headers.Get(headerContentLength)
|
contentLength := headers.Get(headerContentLength)
|
||||||
|
@ -98,6 +111,11 @@ func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string
|
||||||
contentLength = ""
|
contentLength = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
canonicalizedResource, err := f.buildCanonicalizedResource(request.URL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
stringToSign := strings.Join([]string{
|
stringToSign := strings.Join([]string{
|
||||||
request.Method,
|
request.Method,
|
||||||
headers.Get(headerContentEncoding),
|
headers.Get(headerContentEncoding),
|
||||||
|
@ -112,9 +130,9 @@ func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string
|
||||||
headers.Get(headerIfUnmodifiedSince),
|
headers.Get(headerIfUnmodifiedSince),
|
||||||
headers.Get(headerRange),
|
headers.Get(headerRange),
|
||||||
buildCanonicalizedHeader(headers),
|
buildCanonicalizedHeader(headers),
|
||||||
f.buildCanonicalizedResource(request.URL),
|
canonicalizedResource,
|
||||||
}, "\n")
|
}, "\n")
|
||||||
return stringToSign
|
return stringToSign, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCanonicalizedHeader(headers http.Header) string {
|
func buildCanonicalizedHeader(headers http.Header) string {
|
||||||
|
@ -146,7 +164,7 @@ func buildCanonicalizedHeader(headers http.Header) string {
|
||||||
return string(ch.Bytes())
|
return string(ch.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
|
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||||
cr := bytes.NewBufferString("/")
|
cr := bytes.NewBufferString("/")
|
||||||
cr.WriteString(f.accountName)
|
cr.WriteString(f.accountName)
|
||||||
|
@ -164,7 +182,7 @@ func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
|
||||||
// params is a map[string][]string; param name is key; params values is []string
|
// params is a map[string][]string; param name is key; params values is []string
|
||||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(params) > 0 { // There is at least 1 query parameter
|
if len(params) > 0 { // There is at least 1 query parameter
|
||||||
|
@ -183,5 +201,5 @@ func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
|
||||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return string(cr.Bytes())
|
return string(cr.Bytes()), nil
|
||||||
}
|
}
|
|
@ -2,6 +2,7 @@ package azblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -11,6 +12,10 @@ import (
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TokenRefresher represents a callback method that you write; this method is called periodically
|
||||||
|
// so you can refresh the token credential's value.
|
||||||
|
type TokenRefresher func(credential TokenCredential) time.Duration
|
||||||
|
|
||||||
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
||||||
type TokenCredential interface {
|
type TokenCredential interface {
|
||||||
Credential
|
Credential
|
||||||
|
@ -20,12 +25,15 @@ type TokenCredential interface {
|
||||||
|
|
||||||
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
||||||
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
||||||
// tokenRefresher, then the function you pass will be called immediately (so it can refresh and change the
|
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
|
||||||
// TokenCredential's token value by calling SetToken; your tokenRefresher function must return a time.Duration
|
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
|
||||||
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
||||||
func NewTokenCredential(initialToken string, tokenRefresher func(credential TokenCredential) time.Duration) TokenCredential {
|
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
|
||||||
|
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
|
||||||
|
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
|
||||||
|
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
|
||||||
tc := &tokenCredential{}
|
tc := &tokenCredential{}
|
||||||
tc.SetToken(initialToken) // We dont' set it above to guarantee atomicity
|
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
|
||||||
if tokenRefresher == nil {
|
if tokenRefresher == nil {
|
||||||
return tc // If no callback specified, return the simple tokenCredential
|
return tc // If no callback specified, return the simple tokenCredential
|
||||||
}
|
}
|
||||||
|
@ -68,7 +76,7 @@ type tokenCredential struct {
|
||||||
|
|
||||||
// The members below are only used if the user specified a tokenRefresher callback function.
|
// The members below are only used if the user specified a tokenRefresher callback function.
|
||||||
timer *time.Timer
|
timer *time.Timer
|
||||||
tokenRefresher func(c TokenCredential) time.Duration
|
tokenRefresher TokenRefresher
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
stopped bool
|
stopped bool
|
||||||
}
|
}
|
||||||
|
@ -84,7 +92,7 @@ func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
|
||||||
|
|
||||||
// startRefresh calls refresh which immediately calls tokenRefresher
|
// startRefresh calls refresh which immediately calls tokenRefresher
|
||||||
// and then starts a timer to call tokenRefresher in the future.
|
// and then starts a timer to call tokenRefresher in the future.
|
||||||
func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) time.Duration) {
|
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) {
|
||||||
f.tokenRefresher = tokenRefresher
|
f.tokenRefresher = tokenRefresher
|
||||||
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
||||||
f.refresh()
|
f.refresh()
|
||||||
|
@ -95,12 +103,14 @@ func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) ti
|
||||||
// in order to refresh the token again in the future.
|
// in order to refresh the token again in the future.
|
||||||
func (f *tokenCredential) refresh() {
|
func (f *tokenCredential) refresh() {
|
||||||
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
||||||
|
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
if !f.stopped {
|
if !f.stopped {
|
||||||
f.timer = time.AfterFunc(d, f.refresh)
|
f.timer = time.AfterFunc(d, f.refresh)
|
||||||
}
|
}
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
||||||
// any new timer from starting.
|
// any new timer from starting.
|
||||||
|
@ -118,7 +128,8 @@ func (f *tokenCredential) stopRefresh() {
|
||||||
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||||
if request.URL.Scheme != "https" {
|
if request.URL.Scheme != "https" {
|
||||||
panic("Token credentials require a URL using the https protocol scheme.")
|
// HTTPS must be used, otherwise the tokens are at the risk of being exposed
|
||||||
|
return nil, errors.New("token credentials require a URL using the https protocol scheme")
|
||||||
}
|
}
|
||||||
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
|
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
|
||||||
return next.Do(ctx, request)
|
return next.Do(ctx, request)
|
|
@ -1,4 +1,4 @@
|
||||||
// +build linux darwin freebsd
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
package azblob
|
package azblob
|
||||||
|
|
||||||
|
@ -22,6 +22,6 @@ func (m *mmf) unmap() {
|
||||||
err := syscall.Munmap(*m)
|
err := syscall.Munmap(*m)
|
||||||
*m = nil
|
*m = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -33,6 +33,6 @@ func (m *mmf) unmap() {
|
||||||
*m = mmf{}
|
*m = mmf{}
|
||||||
err := syscall.UnmapViewOfFile(addr)
|
err := syscall.UnmapViewOfFile(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -17,14 +17,13 @@ type PipelineOptions struct {
|
||||||
|
|
||||||
// Telemetry configures the built-in telemetry policy behavior.
|
// Telemetry configures the built-in telemetry policy behavior.
|
||||||
Telemetry TelemetryOptions
|
Telemetry TelemetryOptions
|
||||||
|
|
||||||
|
// HTTPSender configures the sender of HTTP requests
|
||||||
|
HTTPSender pipeline.Factory
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
// NewPipeline creates a Pipeline using the specified credentials and options.
|
||||||
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
||||||
if c == nil {
|
|
||||||
panic("c can't be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closest to API goes first; closest to the wire goes last
|
// Closest to API goes first; closest to the wire goes last
|
||||||
f := []pipeline.Factory{
|
f := []pipeline.Factory{
|
||||||
NewTelemetryPolicyFactory(o.Telemetry),
|
NewTelemetryPolicyFactory(o.Telemetry),
|
||||||
|
@ -39,8 +38,9 @@ func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
||||||
f = append(f, c)
|
f = append(f, c)
|
||||||
}
|
}
|
||||||
f = append(f,
|
f = append(f,
|
||||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
NewRequestLogPolicyFactory(o.RequestLog),
|
||||||
NewRequestLogPolicyFactory(o.RequestLog))
|
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
||||||
|
|
||||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
|
|
||||||
|
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
|
||||||
}
|
}
|
|
@ -109,7 +109,8 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func redactSigQueryParam(rawQuery string) (bool, string) {
|
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||||
|
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||||
if !sigFound {
|
if !sigFound {
|
||||||
|
@ -130,12 +131,13 @@ func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||||
|
|
||||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||||
req := request
|
req := request
|
||||||
if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound {
|
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||||
req = request.Copy()
|
req = request.Copy()
|
||||||
req.Request.URL.RawQuery = rawQuery
|
req.Request.URL.RawQuery = rawQuery
|
||||||
}
|
}
|
||||||
return req.Request
|
|
||||||
|
return prepareRequestForServiceLogging(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
func stack() []byte {
|
func stack() []byte {
|
||||||
|
@ -148,3 +150,33 @@ func stack() []byte {
|
||||||
buf = make([]byte, 2*len(buf))
|
buf = make([]byte, 2*len(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Redact phase useful for blob and file service only. For other services,
|
||||||
|
// this method can directly return request.Request.
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||||
|
req := request
|
||||||
|
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
||||||
|
req = request.Copy()
|
||||||
|
url, err := url.Parse(req.Header.Get(key))
|
||||||
|
if err == nil {
|
||||||
|
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||||
|
url.RawQuery = rawQuery
|
||||||
|
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return req.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
const xMsCopySourceHeader = "x-ms-copy-source"
|
||||||
|
|
||||||
|
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
||||||
|
for keyInHeader := range header {
|
||||||
|
if strings.EqualFold(keyInHeader, key) {
|
||||||
|
return true, keyInHeader
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, ""
|
||||||
|
}
|
|
@ -2,15 +2,17 @@ package azblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"io/ioutil"
|
|
||||||
"io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||||
|
@ -66,21 +68,12 @@ func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o RetryOptions) defaults() RetryOptions {
|
func (o RetryOptions) defaults() RetryOptions {
|
||||||
if o.Policy != RetryPolicyExponential && o.Policy != RetryPolicyFixed {
|
// We assume the following:
|
||||||
panic("RetryPolicy must be RetryPolicyExponential or RetryPolicyFixed")
|
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
||||||
}
|
// 2. o.MaxTries >= 0
|
||||||
if o.MaxTries < 0 {
|
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
||||||
panic("MaxTries must be >= 0")
|
// 4. o.RetryDelay <= o.MaxRetryDelay
|
||||||
}
|
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
||||||
if o.TryTimeout < 0 || o.RetryDelay < 0 || o.MaxRetryDelay < 0 {
|
|
||||||
panic("TryTimeout, RetryDelay, and MaxRetryDelay must all be >= 0")
|
|
||||||
}
|
|
||||||
if o.RetryDelay > o.MaxRetryDelay {
|
|
||||||
panic("RetryDelay must be <= MaxRetryDelay")
|
|
||||||
}
|
|
||||||
if (o.RetryDelay == 0 && o.MaxRetryDelay != 0) || (o.RetryDelay != 0 && o.MaxRetryDelay == 0) {
|
|
||||||
panic("Both RetryDelay and MaxRetryDelay must be 0 or neither can be 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
IfDefault := func(current *time.Duration, desired time.Duration) {
|
IfDefault := func(current *time.Duration, desired time.Duration) {
|
||||||
if *current == time.Duration(0) {
|
if *current == time.Duration(0) {
|
||||||
|
@ -127,7 +120,8 @@ func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never
|
||||||
}
|
}
|
||||||
|
|
||||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||||
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
|
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||||
|
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||||
if delay > o.MaxRetryDelay {
|
if delay > o.MaxRetryDelay {
|
||||||
delay = o.MaxRetryDelay
|
delay = o.MaxRetryDelay
|
||||||
}
|
}
|
||||||
|
@ -164,7 +158,8 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
||||||
time.Sleep(delay) // The 1st try returns 0 delay
|
time.Sleep(delay) // The 1st try returns 0 delay
|
||||||
} else {
|
} else {
|
||||||
delay := time.Second * time.Duration(rand.Float32()/2+0.8)
|
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||||
|
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
||||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
||||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||||
}
|
}
|
||||||
|
@ -175,11 +170,14 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
||||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
||||||
// 1st try as for additional tries.
|
// 1st try as for additional tries.
|
||||||
if err = requestCopy.RewindBody(); err != nil {
|
err = requestCopy.RewindBody()
|
||||||
panic(err)
|
if err != nil {
|
||||||
|
return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !tryingPrimary {
|
if !tryingPrimary {
|
||||||
requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost()
|
requestCopy.URL.Host = o.retryReadsFromSecondaryHost()
|
||||||
|
requestCopy.Host = o.retryReadsFromSecondaryHost()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||||
|
@ -214,16 +212,34 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||||
switch {
|
switch {
|
||||||
case ctx.Err() != nil:
|
case ctx.Err() != nil:
|
||||||
action = "NoRetry: Op timeout"
|
action = "NoRetry: Op timeout"
|
||||||
case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound:
|
case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound:
|
||||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||||
// the resource was not found. This may be due to replication delay. So, in this
|
// the resource was not found. This may be due to replication delay. So, in this
|
||||||
// case, we'll never try the secondary again for this operation.
|
// case, we'll never try the secondary again for this operation.
|
||||||
considerSecondary = false
|
considerSecondary = false
|
||||||
action = "Retry: Secondary URL returned 404"
|
action = "Retry: Secondary URL returned 404"
|
||||||
case err != nil:
|
case err != nil:
|
||||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
|
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||||
if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
|
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||||
action = "Retry: net.Error and Temporary() or Timeout()"
|
// ServiceCode is set only when error related to storage service happened.
|
||||||
|
if stErr, ok := err.(StorageError); ok {
|
||||||
|
if stErr.Temporary() {
|
||||||
|
action = "Retry: StorageError with error service code and Temporary()"
|
||||||
|
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||||
|
action = "Retry: StorageError with success status code"
|
||||||
|
} else {
|
||||||
|
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||||
|
}
|
||||||
|
} else if netErr, ok := err.(net.Error); ok {
|
||||||
|
// Use non-retriable net.Error list, but not retriable list.
|
||||||
|
// As there are errors without Temporary() implementation,
|
||||||
|
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||||
|
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||||
|
if !isNotRetriable(netErr) {
|
||||||
|
action = "Retry: net.Error and not in the non-retriable list"
|
||||||
|
} else {
|
||||||
|
action = "NoRetry: net.Error and in the non-retriable list"
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
action = "NoRetry: unrecognized error"
|
action = "NoRetry: unrecognized error"
|
||||||
}
|
}
|
||||||
|
@ -237,11 +253,17 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||||
} else {
|
} else {
|
||||||
// TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled.
|
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||||
// Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
|
||||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||||
_ = tryCancel // So, for now, we don't call cancel: cancel()
|
if response == nil || response.Response() == nil {
|
||||||
|
// We do panic in the case response or response.Response() is nil,
|
||||||
|
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||||
|
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||||
|
// as in this case, current per-try has nothing to do in future.
|
||||||
|
return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully")
|
||||||
|
}
|
||||||
|
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||||
}
|
}
|
||||||
break // Don't retry
|
break // Don't retry
|
||||||
}
|
}
|
||||||
|
@ -259,6 +281,78 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||||
|
type contextCancelReadCloser struct {
|
||||||
|
cf context.CancelFunc
|
||||||
|
body io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||||
|
return rc.body.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *contextCancelReadCloser) Close() error {
|
||||||
|
err := rc.body.Close()
|
||||||
|
if rc.cf != nil {
|
||||||
|
rc.cf()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||||
|
func isNotRetriable(errToParse net.Error) bool {
|
||||||
|
// No error, so this is NOT retriable.
|
||||||
|
if errToParse == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||||
|
if errToParse.Temporary() || errToParse.Timeout() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
genericErr := error(errToParse)
|
||||||
|
|
||||||
|
// From here all the error are neither Temporary() nor Timeout().
|
||||||
|
switch err := errToParse.(type) {
|
||||||
|
case *net.OpError:
|
||||||
|
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||||
|
if err.Err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
genericErr = err.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch genericErr.(type) {
|
||||||
|
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||||
|
// If the error is one of the ones listed, then it is NOT retriable.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||||
|
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||||
|
if strings.Contains(genericErr.Error(), "invalid header field") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume the error is retriable.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||||
|
|
||||||
|
func isSuccessStatusCode(resp *http.Response) bool {
|
||||||
|
if resp == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, i := range successStatusCodes {
|
||||||
|
if i == resp.StatusCode {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||||
var logf = func(format string, a ...interface{}) {}
|
var logf = func(format string, a ...interface{}) {}
|
||||||
|
|
178
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
Normal file
178
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
package azblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const CountToEnd = 0
|
||||||
|
|
||||||
|
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
||||||
|
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
|
||||||
|
|
||||||
|
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
||||||
|
// that should be used to make an HTTP GET request.
|
||||||
|
type HTTPGetterInfo struct {
|
||||||
|
// Offset specifies the start offset that should be used when
|
||||||
|
// creating the HTTP GET request's Range header
|
||||||
|
Offset int64
|
||||||
|
|
||||||
|
// Count specifies the count of bytes that should be used to calculate
|
||||||
|
// the end offset when creating the HTTP GET request's Range header
|
||||||
|
Count int64
|
||||||
|
|
||||||
|
// ETag specifies the resource's etag that should be used when creating
|
||||||
|
// the HTTP GET request's If-Match header
|
||||||
|
ETag ETag
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||||
|
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||||
|
|
||||||
|
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||||
|
type RetryReaderOptions struct {
|
||||||
|
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||||
|
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
||||||
|
// GET requests will be made.
|
||||||
|
MaxRetryRequests int
|
||||||
|
doInjectError bool
|
||||||
|
doInjectErrorRound int
|
||||||
|
|
||||||
|
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||||
|
NotifyFailedRead FailedReadNotifier
|
||||||
|
|
||||||
|
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||||
|
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||||
|
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||||
|
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||||
|
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||||
|
// treated as a fatal (non-retryable) error.
|
||||||
|
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||||
|
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||||
|
// which will be retried.
|
||||||
|
TreatEarlyCloseAsError bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryReader implements io.ReaderCloser methods.
|
||||||
|
// retryReader tries to read from response, and if there is retriable network error
|
||||||
|
// returned during reading, it will retry according to retry reader option through executing
|
||||||
|
// user defined action with provided data to get a new response, and continue the overall reading process
|
||||||
|
// through reading from the new response.
|
||||||
|
type retryReader struct {
|
||||||
|
ctx context.Context
|
||||||
|
info HTTPGetterInfo
|
||||||
|
countWasBounded bool
|
||||||
|
o RetryReaderOptions
|
||||||
|
getter HTTPGetter
|
||||||
|
|
||||||
|
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||||
|
responseMu *sync.Mutex
|
||||||
|
response *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRetryReader creates a retry reader.
|
||||||
|
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||||
|
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||||
|
return &retryReader{
|
||||||
|
ctx: ctx,
|
||||||
|
getter: getter,
|
||||||
|
info: info,
|
||||||
|
countWasBounded: info.Count != CountToEnd,
|
||||||
|
response: initialResponse,
|
||||||
|
responseMu: &sync.Mutex{},
|
||||||
|
o: o}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *retryReader) setResponse(r *http.Response) {
|
||||||
|
s.responseMu.Lock()
|
||||||
|
defer s.responseMu.Unlock()
|
||||||
|
s.response = r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||||
|
for try := 0; ; try++ {
|
||||||
|
//fmt.Println(try) // Comment out for debugging.
|
||||||
|
if s.countWasBounded && s.info.Count == CountToEnd {
|
||||||
|
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
s.responseMu.Lock()
|
||||||
|
resp := s.response
|
||||||
|
s.responseMu.Unlock()
|
||||||
|
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||||
|
newResponse, err := s.getter(s.ctx, s.info)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// Successful GET; this is the network stream we'll read from.
|
||||||
|
s.setResponse(newResponse)
|
||||||
|
resp = newResponse
|
||||||
|
}
|
||||||
|
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||||
|
|
||||||
|
// Injection mechanism for testing.
|
||||||
|
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||||
|
err = &net.DNSError{IsTemporary: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We successfully read data or end EOF.
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
||||||
|
if s.info.Count != CountToEnd {
|
||||||
|
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
||||||
|
}
|
||||||
|
return n, err // Return the return to the caller
|
||||||
|
}
|
||||||
|
s.Close() // Error, close stream
|
||||||
|
s.setResponse(nil) // Our stream is no longer good
|
||||||
|
|
||||||
|
// Check the retry count and error code, and decide whether to retry.
|
||||||
|
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||||
|
_, isNetError := err.(net.Error)
|
||||||
|
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||||
|
|
||||||
|
// Notify, for logging purposes, of any failures
|
||||||
|
if s.o.NotifyFailedRead != nil {
|
||||||
|
failureCount := try + 1 // because try is zero-based
|
||||||
|
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||||
|
}
|
||||||
|
|
||||||
|
if willRetry {
|
||||||
|
continue
|
||||||
|
// Loop around and try to get and read from new stream.
|
||||||
|
}
|
||||||
|
return n, err // Not retryable, or retries exhausted, so just return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||||
|
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||||
|
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||||
|
// which is exactly the behaviour we want.
|
||||||
|
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||||
|
// then there are two different types of error that may happen - either the one one we check for here,
|
||||||
|
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||||
|
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||||
|
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||||
|
if s.o.TreatEarlyCloseAsError {
|
||||||
|
return false // user wants all early closes to be errors, and so not retryable
|
||||||
|
}
|
||||||
|
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||||
|
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||||
|
|
||||||
|
func (s *retryReader) Close() error {
|
||||||
|
s.responseMu.Lock()
|
||||||
|
defer s.responseMu.Unlock()
|
||||||
|
if s.response != nil && s.response.Body != nil {
|
||||||
|
return s.response.Body.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package azblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -22,21 +23,21 @@ type AccountSASSignatureValues struct {
|
||||||
|
|
||||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||||
// the proper SAS query parameters.
|
// the proper SAS query parameters.
|
||||||
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
|
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
|
||||||
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
|
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
|
||||||
panic("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
|
return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
|
||||||
}
|
}
|
||||||
if v.Version == "" {
|
if v.Version == "" {
|
||||||
v.Version = SASVersion
|
v.Version = SASVersion
|
||||||
}
|
}
|
||||||
perms := &AccountSASPermissions{}
|
perms := &AccountSASPermissions{}
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
if err := perms.Parse(v.Permissions); err != nil {
|
||||||
panic(err)
|
return SASQueryParameters{}, err
|
||||||
}
|
}
|
||||||
v.Permissions = perms.String()
|
v.Permissions = perms.String()
|
||||||
|
|
||||||
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
|
startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{})
|
||||||
|
|
||||||
stringToSign := strings.Join([]string{
|
stringToSign := strings.Join([]string{
|
||||||
sharedKeyCredential.AccountName(),
|
sharedKeyCredential.AccountName(),
|
||||||
|
@ -68,7 +69,8 @@ func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Sh
|
||||||
// Calculated SAS signature
|
// Calculated SAS signature
|
||||||
signature: signature,
|
signature: signature,
|
||||||
}
|
}
|
||||||
return p
|
|
||||||
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
|
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
|
||||||
|
@ -205,7 +207,7 @@ func (rt *AccountSASResourceTypes) Parse(s string) error {
|
||||||
switch r {
|
switch r {
|
||||||
case 's':
|
case 's':
|
||||||
rt.Service = true
|
rt.Service = true
|
||||||
case 'q':
|
case 'c':
|
||||||
rt.Container = true
|
rt.Container = true
|
||||||
case 'o':
|
case 'o':
|
||||||
rt.Object = true
|
rt.Object = true
|
|
@ -22,7 +22,7 @@ const (
|
||||||
|
|
||||||
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
|
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
|
||||||
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
|
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
|
||||||
func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
|
func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) {
|
||||||
ss := ""
|
ss := ""
|
||||||
if !startTime.IsZero() {
|
if !startTime.IsZero() {
|
||||||
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||||
|
@ -31,7 +31,11 @@ func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string)
|
||||||
if !expiryTime.IsZero() {
|
if !expiryTime.IsZero() {
|
||||||
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||||
}
|
}
|
||||||
return ss, se
|
sh := ""
|
||||||
|
if !snapshotTime.IsZero() {
|
||||||
|
sh = snapshotTime.Format(SnapshotTimeFormat)
|
||||||
|
}
|
||||||
|
return ss, se, sh
|
||||||
}
|
}
|
||||||
|
|
||||||
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
|
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
|
||||||
|
@ -53,11 +57,51 @@ type SASQueryParameters struct {
|
||||||
protocol SASProtocol `param:"spr"`
|
protocol SASProtocol `param:"spr"`
|
||||||
startTime time.Time `param:"st"`
|
startTime time.Time `param:"st"`
|
||||||
expiryTime time.Time `param:"se"`
|
expiryTime time.Time `param:"se"`
|
||||||
|
snapshotTime time.Time `param:"snapshot"`
|
||||||
ipRange IPRange `param:"sip"`
|
ipRange IPRange `param:"sip"`
|
||||||
identifier string `param:"si"`
|
identifier string `param:"si"`
|
||||||
resource string `param:"sr"`
|
resource string `param:"sr"`
|
||||||
permissions string `param:"sp"`
|
permissions string `param:"sp"`
|
||||||
signature string `param:"sig"`
|
signature string `param:"sig"`
|
||||||
|
cacheControl string `param:"rscc"`
|
||||||
|
contentDisposition string `param:"rscd"`
|
||||||
|
contentEncoding string `param:"rsce"`
|
||||||
|
contentLanguage string `param:"rscl"`
|
||||||
|
contentType string `param:"rsct"`
|
||||||
|
signedOid string `param:"skoid"`
|
||||||
|
signedTid string `param:"sktid"`
|
||||||
|
signedStart time.Time `param:"skt"`
|
||||||
|
signedExpiry time.Time `param:"ske"`
|
||||||
|
signedService string `param:"sks"`
|
||||||
|
signedVersion string `param:"skv"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedOid() string {
|
||||||
|
return p.signedOid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedTid() string {
|
||||||
|
return p.signedTid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedStart() time.Time {
|
||||||
|
return p.signedStart
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedExpiry() time.Time {
|
||||||
|
return p.signedExpiry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedService() string {
|
||||||
|
return p.signedService
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SignedVersion() string {
|
||||||
|
return p.signedVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) SnapshotTime() time.Time {
|
||||||
|
return p.snapshotTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *SASQueryParameters) Version() string {
|
func (p *SASQueryParameters) Version() string {
|
||||||
|
@ -99,6 +143,26 @@ func (p *SASQueryParameters) Signature() string {
|
||||||
return p.signature
|
return p.signature
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) CacheControl() string {
|
||||||
|
return p.cacheControl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) ContentDisposition() string {
|
||||||
|
return p.contentDisposition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) ContentEncoding() string {
|
||||||
|
return p.contentEncoding
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) ContentLanguage() string {
|
||||||
|
return p.contentLanguage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SASQueryParameters) ContentType() string {
|
||||||
|
return p.contentType
|
||||||
|
}
|
||||||
|
|
||||||
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
||||||
type IPRange struct {
|
type IPRange struct {
|
||||||
Start net.IP // Not specified if length = 0
|
Start net.IP // Not specified if length = 0
|
||||||
|
@ -135,6 +199,8 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
|
||||||
p.resourceTypes = val
|
p.resourceTypes = val
|
||||||
case "spr":
|
case "spr":
|
||||||
p.protocol = SASProtocol(val)
|
p.protocol = SASProtocol(val)
|
||||||
|
case "snapshot":
|
||||||
|
p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val)
|
||||||
case "st":
|
case "st":
|
||||||
p.startTime, _ = time.Parse(SASTimeFormat, val)
|
p.startTime, _ = time.Parse(SASTimeFormat, val)
|
||||||
case "se":
|
case "se":
|
||||||
|
@ -155,6 +221,28 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
|
||||||
p.permissions = val
|
p.permissions = val
|
||||||
case "sig":
|
case "sig":
|
||||||
p.signature = val
|
p.signature = val
|
||||||
|
case "rscc":
|
||||||
|
p.cacheControl = val
|
||||||
|
case "rscd":
|
||||||
|
p.contentDisposition = val
|
||||||
|
case "rsce":
|
||||||
|
p.contentEncoding = val
|
||||||
|
case "rscl":
|
||||||
|
p.contentLanguage = val
|
||||||
|
case "rsct":
|
||||||
|
p.contentType = val
|
||||||
|
case "skoid":
|
||||||
|
p.signedOid = val
|
||||||
|
case "sktid":
|
||||||
|
p.signedTid = val
|
||||||
|
case "skt":
|
||||||
|
p.signedStart, _ = time.Parse(SASTimeFormat, val)
|
||||||
|
case "ske":
|
||||||
|
p.signedExpiry, _ = time.Parse(SASTimeFormat, val)
|
||||||
|
case "sks":
|
||||||
|
p.signedService = val
|
||||||
|
case "skv":
|
||||||
|
p.signedVersion = val
|
||||||
default:
|
default:
|
||||||
isSASKey = false // We didn't recognize the query parameter
|
isSASKey = false // We didn't recognize the query parameter
|
||||||
}
|
}
|
||||||
|
@ -197,9 +285,32 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
|
||||||
if p.permissions != "" {
|
if p.permissions != "" {
|
||||||
v.Add("sp", p.permissions)
|
v.Add("sp", p.permissions)
|
||||||
}
|
}
|
||||||
|
if p.signedOid != "" {
|
||||||
|
v.Add("skoid", p.signedOid)
|
||||||
|
v.Add("sktid", p.signedTid)
|
||||||
|
v.Add("skt", p.signedStart.Format(SASTimeFormat))
|
||||||
|
v.Add("ske", p.signedExpiry.Format(SASTimeFormat))
|
||||||
|
v.Add("sks", p.signedService)
|
||||||
|
v.Add("skv", p.signedVersion)
|
||||||
|
}
|
||||||
if p.signature != "" {
|
if p.signature != "" {
|
||||||
v.Add("sig", p.signature)
|
v.Add("sig", p.signature)
|
||||||
}
|
}
|
||||||
|
if p.cacheControl != "" {
|
||||||
|
v.Add("rscc", p.cacheControl)
|
||||||
|
}
|
||||||
|
if p.contentDisposition != "" {
|
||||||
|
v.Add("rscd", p.contentDisposition)
|
||||||
|
}
|
||||||
|
if p.contentEncoding != "" {
|
||||||
|
v.Add("rsce", p.contentEncoding)
|
||||||
|
}
|
||||||
|
if p.contentLanguage != "" {
|
||||||
|
v.Add("rscl", p.contentLanguage)
|
||||||
|
}
|
||||||
|
if p.contentType != "" {
|
||||||
|
v.Add("rsct", p.contentType)
|
||||||
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,11 +43,14 @@ func newStorageError(cause error, response *http.Response, description string) e
|
||||||
response: response,
|
response: response,
|
||||||
description: description,
|
description: description,
|
||||||
},
|
},
|
||||||
|
serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
|
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
|
||||||
func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode }
|
func (e *storageError) ServiceCode() ServiceCodeType {
|
||||||
|
return e.serviceCode
|
||||||
|
}
|
||||||
|
|
||||||
// Error implements the error interface's Error method to return a string representation of the error.
|
// Error implements the error interface's Error method to return a string representation of the error.
|
||||||
func (e *storageError) Error() string {
|
func (e *storageError) Error() string {
|
||||||
|
@ -94,8 +97,6 @@ func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err
|
||||||
break
|
break
|
||||||
case xml.CharData:
|
case xml.CharData:
|
||||||
switch tokName {
|
switch tokName {
|
||||||
case "Code":
|
|
||||||
e.serviceCode = ServiceCodeType(tt)
|
|
||||||
case "Message":
|
case "Message":
|
||||||
e.description = string(tt)
|
e.description = string(tt)
|
||||||
default:
|
default:
|
|
@ -16,16 +16,10 @@ type httpRange struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r httpRange) pointers() *string {
|
func (r httpRange) pointers() *string {
|
||||||
if r.offset == 0 && r.count == 0 { // Do common case first for performance
|
if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
|
||||||
return nil // No specified range
|
return nil // No specified range
|
||||||
}
|
}
|
||||||
if r.offset < 0 {
|
endOffset := "" // if count == CountToEnd (0)
|
||||||
panic("The range offset must be >= 0")
|
|
||||||
}
|
|
||||||
if r.count < 0 {
|
|
||||||
panic("The range count must be >= 0")
|
|
||||||
}
|
|
||||||
endOffset := "" // if count == 0
|
|
||||||
if r.count > 0 {
|
if r.count > 0 {
|
||||||
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
|
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
|
||||||
}
|
}
|
||||||
|
@ -35,27 +29,36 @@ func (r httpRange) pointers() *string {
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 {
|
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) {
|
||||||
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
|
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
|
||||||
return 0
|
return 0, nil
|
||||||
}
|
|
||||||
validateSeekableStreamAt0(body)
|
|
||||||
count, err := body.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
panic("failed to seek stream")
|
|
||||||
}
|
|
||||||
body.Seek(0, io.SeekStart)
|
|
||||||
return count
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateSeekableStreamAt0(body io.ReadSeeker) {
|
err := validateSeekableStreamAt0(body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := body.Seek(0, io.SeekEnd)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.New("body stream must be seekable")
|
||||||
|
}
|
||||||
|
|
||||||
|
body.Seek(0, io.SeekStart)
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// return an error if body is not a valid seekable stream at 0
|
||||||
|
func validateSeekableStreamAt0(body io.ReadSeeker) error {
|
||||||
if body == nil { // nil body's are "logically" seekable to 0
|
if body == nil { // nil body's are "logically" seekable to 0
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
|
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
|
||||||
|
// Help detect programmer error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return errors.New("body stream must be seekable")
|
||||||
}
|
}
|
||||||
panic(errors.New("stream must be set to position 0"))
|
return errors.New("body stream must be set to position 0")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
|
@ -21,10 +21,7 @@ type uuid [16]byte
|
||||||
func newUUID() (u uuid) {
|
func newUUID() (u uuid) {
|
||||||
u = uuid{}
|
u = uuid{}
|
||||||
// Set all bits to randomly (or pseudo-randomly) chosen values.
|
// Set all bits to randomly (or pseudo-randomly) chosen values.
|
||||||
_, err := rand.Read(u[:])
|
rand.Read(u[:])
|
||||||
if err != nil {
|
|
||||||
panic("ran.Read failed")
|
|
||||||
}
|
|
||||||
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
|
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
|
||||||
|
|
||||||
var version byte = 4
|
var version byte = 4
|
|
@ -33,20 +33,21 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
|
||||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||||
// information, see <a
|
// information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||||
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
|
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||||
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
|
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||||
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
|
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||||
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
|
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||||
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
|
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||||
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
|
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||||
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
|
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||||
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
|
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||||
// only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to
|
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||||
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// in the analytics logs when storage analytics logging is enabled.
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
// logs when storage analytics logging is enabled.
|
||||||
|
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: body,
|
{targetValue: body,
|
||||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||||
|
@ -55,7 +56,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -67,7 +68,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||||
}
|
}
|
||||||
|
|
||||||
// appendBlockPreparer prepares the AppendBlock request.
|
// appendBlockPreparer prepares the AppendBlock request.
|
||||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -79,6 +80,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
|
||||||
params.Set("comp", "appendblock")
|
params.Set("comp", "appendblock")
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
|
if transactionalContentMD5 != nil {
|
||||||
|
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||||
|
}
|
||||||
if leaseID != nil {
|
if leaseID != nil {
|
||||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
}
|
}
|
||||||
|
@ -94,8 +98,8 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -118,6 +122,120 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
|
||||||
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err
|
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob
|
||||||
|
// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created
|
||||||
|
// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
|
||||||
|
//
|
||||||
|
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
|
||||||
|
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
|
||||||
|
// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
|
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
|
||||||
|
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
|
||||||
|
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
|
||||||
|
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
|
||||||
|
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
|
||||||
|
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
|
||||||
|
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
|
||||||
|
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
|
||||||
|
// only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to
|
||||||
|
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||||
|
// matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified
|
||||||
|
// since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it
|
||||||
|
// has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs
|
||||||
|
// with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
|
// logs when storage analytics logging is enabled.
|
||||||
|
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||||
|
if err := validate([]validation{
|
||||||
|
{targetValue: timeout,
|
||||||
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*AppendBlobAppendBlockFromURLResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
|
||||||
|
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
if timeout != nil {
|
||||||
|
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||||
|
}
|
||||||
|
params.Set("comp", "appendblock")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||||
|
if sourceRange != nil {
|
||||||
|
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||||
|
}
|
||||||
|
if sourceContentMD5 != nil {
|
||||||
|
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
|
if leaseID != nil {
|
||||||
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
|
}
|
||||||
|
if maxSize != nil {
|
||||||
|
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10))
|
||||||
|
}
|
||||||
|
if appendPosition != nil {
|
||||||
|
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
|
||||||
|
}
|
||||||
|
if ifModifiedSince != nil {
|
||||||
|
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifMatch != nil {
|
||||||
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
|
}
|
||||||
|
if ifNoneMatch != nil {
|
||||||
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
}
|
||||||
|
if sourceIfModifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
|
||||||
|
}
|
||||||
|
if sourceIfNoneMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
if requestID != nil {
|
||||||
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request.
|
||||||
|
func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create the Create Append Blob operation creates a new append blob.
|
// Create the Create Append Blob operation creates a new append blob.
|
||||||
//
|
//
|
||||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||||
|
@ -135,25 +253,22 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
|
||||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
|
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
// analytics logging is enabled.
|
// analytics logging is enabled.
|
||||||
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
|
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -165,7 +280,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
// createPreparer prepares the Create request.
|
// createPreparer prepares the Create request.
|
||||||
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -208,8 +323,8 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
|
@ -6,14 +6,13 @@ package azblob
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// blobClient is the client for the Blob methods of the Azblob service.
|
// blobClient is the client for the Blob methods of the Azblob service.
|
||||||
|
@ -32,7 +31,7 @@ func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient {
|
||||||
// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is
|
// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is
|
||||||
// the timeout parameter is expressed in seconds. For more information, see <a
|
// the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) {
|
func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) {
|
||||||
|
@ -99,18 +98,18 @@ func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipe
|
||||||
// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor
|
// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor
|
||||||
// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a
|
// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) {
|
func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -122,7 +121,7 @@ func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, durat
|
||||||
}
|
}
|
||||||
|
|
||||||
// acquireLeasePreparer prepares the AcquireLease request.
|
// acquireLeasePreparer prepares the AcquireLease request.
|
||||||
func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -145,8 +144,8 @@ func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, p
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -182,18 +181,18 @@ func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline
|
||||||
// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an
|
// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an
|
||||||
// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has
|
// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has
|
||||||
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// analytics logs when storage analytics logging is enabled.
|
// logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) {
|
func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -205,7 +204,7 @@ func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPe
|
||||||
}
|
}
|
||||||
|
|
||||||
// breakLeasePreparer prepares the BreakLease request.
|
// breakLeasePreparer prepares the BreakLease request.
|
||||||
func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -225,8 +224,8 @@ func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32,
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -253,25 +252,25 @@ func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.R
|
||||||
// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
||||||
// operations
|
// operations
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string
|
||||||
// proposedLeaseID is proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the
|
// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See
|
||||||
// proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string
|
// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in
|
||||||
// formats. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate
|
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||||
// in the analytics logs when storage analytics logging is enabled.
|
// in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) {
|
func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -283,7 +282,7 @@ func (client blobClient) ChangeLease(ctx context.Context, leaseID string, propos
|
||||||
}
|
}
|
||||||
|
|
||||||
// changeLeasePreparer prepares the ChangeLease request.
|
// changeLeasePreparer prepares the ChangeLease request.
|
||||||
func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -302,8 +301,8 @@ func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID str
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -327,6 +326,111 @@ func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.
|
||||||
return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err
|
return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyFromURL the Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a
|
||||||
|
// response until the copy is complete.
|
||||||
|
//
|
||||||
|
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
|
||||||
|
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
|
||||||
|
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
|
||||||
|
// expressed in seconds. For more information, see <a
|
||||||
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
|
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated
|
||||||
|
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or
|
||||||
|
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
|
||||||
|
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||||
|
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||||
|
// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
|
||||||
|
// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
||||||
|
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
|
||||||
|
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
|
||||||
|
// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
|
// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
|
// logs when storage analytics logging is enabled.
|
||||||
|
func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCopyFromURLResponse, error) {
|
||||||
|
if err := validate([]validation{
|
||||||
|
{targetValue: timeout,
|
||||||
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, err := client.copyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyFromURLResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*BlobCopyFromURLResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFromURLPreparer prepares the CopyFromURL request.
|
||||||
|
func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
if timeout != nil {
|
||||||
|
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
if metadata != nil {
|
||||||
|
for k, v := range metadata {
|
||||||
|
req.Header.Set("x-ms-meta-"+k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sourceIfModifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
|
||||||
|
}
|
||||||
|
if sourceIfNoneMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
||||||
|
}
|
||||||
|
if ifModifiedSince != nil {
|
||||||
|
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifMatch != nil {
|
||||||
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
|
}
|
||||||
|
if ifNoneMatch != nil {
|
||||||
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-copy-source", copySource)
|
||||||
|
if leaseID != nil {
|
||||||
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
if requestID != nil {
|
||||||
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-requires-sync", "true")
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFromURLResponder handles the response to the CopyFromURL request.
|
||||||
|
func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &BlobCopyFromURLResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
||||||
|
|
||||||
// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob
|
// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob
|
||||||
//
|
//
|
||||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
|
@ -338,22 +442,19 @@ func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.
|
||||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||||
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
|
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||||
// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active
|
// without a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and
|
||||||
// and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
// matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
// in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
|
func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID)
|
req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -365,7 +466,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met
|
||||||
}
|
}
|
||||||
|
|
||||||
// createSnapshotPreparer prepares the CreateSnapshot request.
|
// createSnapshotPreparer prepares the CreateSnapshot request.
|
||||||
func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -387,8 +488,8 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -430,23 +531,23 @@ func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeli
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one
|
// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one
|
||||||
// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's
|
// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's
|
||||||
// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has
|
// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has
|
||||||
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// analytics logs when storage analytics logging is enabled.
|
// logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) {
|
func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -458,7 +559,7 @@ func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletePreparer prepares the Delete request.
|
// deletePreparer prepares the Delete request.
|
||||||
func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("DELETE", client.url, nil)
|
req, err := pipeline.NewRequest("DELETE", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -483,8 +584,8 @@ func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseI
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -516,22 +617,22 @@ func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Respo
|
||||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||||
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for
|
// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for
|
||||||
// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value
|
// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value
|
||||||
// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
|
// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
|
||||||
// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatches is
|
// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify
|
||||||
// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to
|
// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only
|
||||||
// operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB
|
// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) {
|
func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -543,7 +644,7 @@ func (client blobClient) Download(ctx context.Context, snapshot *string, timeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadPreparer prepares the Download request.
|
// downloadPreparer prepares the Download request.
|
||||||
func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -571,8 +672,8 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -593,6 +694,44 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
|
||||||
return &downloadResponse{rawResponse: resp.Response()}, err
|
return &downloadResponse{rawResponse: resp.Response()}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountInfo returns the sku name and account kind
|
||||||
|
func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||||
|
req, err := client.getAccountInfoPreparer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*BlobGetAccountInfoResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||||
|
func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("restype", "account")
|
||||||
|
params.Set("comp", "properties")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||||
|
func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system
|
// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system
|
||||||
// properties for the blob. It does not return the content of the blob.
|
// properties for the blob. It does not return the content of the blob.
|
||||||
//
|
//
|
||||||
|
@ -601,21 +740,21 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// analytics logs when storage analytics logging is enabled.
|
// logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) {
|
func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -627,7 +766,7 @@ func (client blobClient) GetProperties(ctx context.Context, snapshot *string, ti
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPropertiesPreparer prepares the GetProperties request.
|
// getPropertiesPreparer prepares the GetProperties request.
|
||||||
func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("HEAD", client.url, nil)
|
req, err := pipeline.NewRequest("HEAD", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -649,8 +788,8 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32,
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -676,23 +815,23 @@ func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipelin
|
||||||
// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
||||||
// operations
|
// operations
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout
|
// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds.
|
||||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
// For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate
|
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||||
// in the analytics logs when storage analytics logging is enabled.
|
// in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) {
|
func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -704,7 +843,7 @@ func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeo
|
||||||
}
|
}
|
||||||
|
|
||||||
// releaseLeasePreparer prepares the ReleaseLease request.
|
// releaseLeasePreparer prepares the ReleaseLease request.
|
||||||
func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -722,8 +861,8 @@ func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, if
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -750,23 +889,23 @@ func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline
|
||||||
// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
|
||||||
// operations
|
// operations
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout
|
// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds.
|
||||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
// For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate
|
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||||
// in the analytics logs when storage analytics logging is enabled.
|
// in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) {
|
func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -778,7 +917,7 @@ func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// renewLeasePreparer prepares the RenewLease request.
|
// renewLeasePreparer prepares the RenewLease request.
|
||||||
func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -796,8 +935,8 @@ func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifMo
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -832,22 +971,21 @@ func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.R
|
||||||
// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If
|
// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If
|
||||||
// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional.
|
// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional.
|
||||||
// Set the blob's content language. If specified, this property is stored with the blob and returned with a read
|
// Set the blob's content language. If specified, this property is stored with the blob and returned with a read
|
||||||
// request. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this
|
// request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// ID. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentDisposition is
|
||||||
// blobContentDisposition is optional. Sets the blob's Content-Disposition header. requestID is provides a
|
// optional. Sets the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
// analytics logging is enabled.
|
func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) {
|
||||||
func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) {
|
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentDisposition, requestID)
|
req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobContentDisposition, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -859,7 +997,7 @@ func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blo
|
||||||
}
|
}
|
||||||
|
|
||||||
// setHTTPHeadersPreparer prepares the SetHTTPHeaders request.
|
// setHTTPHeadersPreparer prepares the SetHTTPHeaders request.
|
||||||
func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -894,8 +1032,8 @@ func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -932,23 +1070,20 @@ func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeli
|
||||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||||
// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the
|
// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the
|
||||||
// container's lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a
|
// resource's lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) {
|
func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -960,7 +1095,7 @@ func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metada
|
||||||
}
|
}
|
||||||
|
|
||||||
// setMetadataPreparer prepares the SetMetadata request.
|
// setMetadataPreparer prepares the SetMetadata request.
|
||||||
func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -985,8 +1120,8 @@ func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -1018,15 +1153,16 @@ func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.
|
||||||
// information, see <a
|
// information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if
|
||||||
func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string) (*BlobSetTierResponse, error) {
|
// specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
|
func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (*BlobSetTierResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.setTierPreparer(tier, timeout, requestID)
|
req, err := client.setTierPreparer(tier, timeout, requestID, leaseID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1038,7 +1174,7 @@ func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeo
|
||||||
}
|
}
|
||||||
|
|
||||||
// setTierPreparer prepares the SetTier request.
|
// setTierPreparer prepares the SetTier request.
|
||||||
func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -1054,6 +1190,9 @@ func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, re
|
||||||
if requestID != nil {
|
if requestID != nil {
|
||||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
}
|
}
|
||||||
|
if leaseID != nil {
|
||||||
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1082,27 +1221,23 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp
|
||||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||||
// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
|
// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
|
||||||
// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
||||||
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatches is
|
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
|
||||||
// specify an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to
|
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
|
||||||
// operate only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a
|
// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active
|
// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// and matches this ID. sourceLeaseID is specify this header to perform the operation only if the lease ID given
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// matches the active lease ID of the source blob. requestID is provides a client-generated, opaque value with a 1 KB
|
// logs when storage analytics logging is enabled.
|
||||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
||||||
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, sourceLeaseID, requestID)
|
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1114,7 +1249,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string
|
||||||
}
|
}
|
||||||
|
|
||||||
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
|
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
|
||||||
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (pipeline.Request, error) {
|
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -1135,8 +1270,8 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
|
||||||
if sourceIfUnmodifiedSince != nil {
|
if sourceIfUnmodifiedSince != nil {
|
||||||
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if sourceIfMatches != nil {
|
if sourceIfMatch != nil {
|
||||||
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatches))
|
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
|
||||||
}
|
}
|
||||||
if sourceIfNoneMatch != nil {
|
if sourceIfNoneMatch != nil {
|
||||||
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
||||||
|
@ -1147,8 +1282,8 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -1157,9 +1292,6 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
|
||||||
if leaseID != nil {
|
if leaseID != nil {
|
||||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
}
|
}
|
||||||
if sourceLeaseID != nil {
|
|
||||||
req.Header.Set("x-ms-source-lease-id", *sourceLeaseID)
|
|
||||||
}
|
|
||||||
req.Header.Set("x-ms-version", ServiceVersion)
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
if requestID != nil {
|
if requestID != nil {
|
||||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
|
@ -48,25 +48,22 @@ func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
|
||||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
|
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
// analytics logging is enabled.
|
// analytics logging is enabled.
|
||||||
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
|
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -78,7 +75,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitBlockListPreparer prepares the CommitBlockList request.
|
// commitBlockListPreparer prepares the CommitBlockList request.
|
||||||
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -121,8 +118,8 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -163,7 +160,7 @@ func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
|
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
|
||||||
|
@ -223,7 +220,7 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -240,13 +237,14 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
|
||||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||||
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
||||||
// successful return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is
|
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
|
||||||
// expressed in seconds. For more information, see <a
|
// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||||
|
// seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: body,
|
{targetValue: body,
|
||||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||||
|
@ -255,7 +253,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, timeout, leaseID, requestID)
|
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -267,7 +265,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||||
}
|
}
|
||||||
|
|
||||||
// stageBlockPreparer prepares the StageBlock request.
|
// stageBlockPreparer prepares the StageBlock request.
|
||||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -280,6 +278,9 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i
|
||||||
params.Set("comp", "block")
|
params.Set("comp", "block")
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
|
if transactionalContentMD5 != nil {
|
||||||
|
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||||
|
}
|
||||||
if leaseID != nil {
|
if leaseID != nil {
|
||||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
}
|
}
|
||||||
|
@ -306,22 +307,26 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel
|
||||||
//
|
//
|
||||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||||
// same size for each block. contentLength is the length of the request. sourceURL is specifiy an URL to the copy
|
// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
|
||||||
// source. sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated
|
// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
|
||||||
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
|
||||||
// seconds. For more information, see <a
|
// more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate
|
||||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
// only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to
|
||||||
|
// operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs
|
||||||
|
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||||
|
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
|
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, requestID)
|
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -333,7 +338,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str
|
||||||
}
|
}
|
||||||
|
|
||||||
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
||||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -346,9 +351,7 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
|
||||||
params.Set("comp", "block")
|
params.Set("comp", "block")
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
if sourceURL != nil {
|
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||||
req.Header.Set("x-ms-copy-source", *sourceURL)
|
|
||||||
}
|
|
||||||
if sourceRange != nil {
|
if sourceRange != nil {
|
||||||
req.Header.Set("x-ms-source-range", *sourceRange)
|
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||||
}
|
}
|
||||||
|
@ -358,6 +361,18 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
|
||||||
if leaseID != nil {
|
if leaseID != nil {
|
||||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
}
|
}
|
||||||
|
if sourceIfModifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
|
||||||
|
}
|
||||||
|
if sourceIfNoneMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
||||||
|
}
|
||||||
req.Header.Set("x-ms-version", ServiceVersion)
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
if requestID != nil {
|
if requestID != nil {
|
||||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
|
@ -397,27 +412,24 @@ func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response)
|
||||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
|
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
// analytics logging is enabled.
|
// analytics logging is enabled.
|
||||||
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
|
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: body,
|
{targetValue: body,
|
||||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -429,7 +441,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadPreparer prepares the Upload request.
|
// uploadPreparer prepares the Upload request.
|
||||||
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -472,8 +484,8 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
|
@ -10,7 +10,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ServiceVersion specifies the version of the operations used in this package.
|
// ServiceVersion specifies the version of the operations used in this package.
|
||||||
ServiceVersion = "2018-03-28"
|
ServiceVersion = "2018-11-09"
|
||||||
)
|
)
|
||||||
|
|
||||||
// managementClient is the base client for Azblob.
|
// managementClient is the base client for Azblob.
|
|
@ -7,14 +7,13 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// containerClient is the client for the Container methods of the Azblob service.
|
// containerClient is the client for the Container methods of the Azblob service.
|
||||||
|
@ -179,10 +178,10 @@ func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipel
|
||||||
// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be
|
// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be
|
||||||
// 15 to 60 seconds, or can be infinite
|
// 15 to 60 seconds, or can be infinite
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string
|
||||||
// proposedLeaseID is proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the
|
// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See
|
||||||
// proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string
|
// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in
|
||||||
// formats. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
|
@ -264,10 +263,7 @@ func (client containerClient) Create(ctx context.Context, timeout *int32, metada
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.createPreparer(timeout, metadata, access, requestID)
|
req, err := client.createPreparer(timeout, metadata, access, requestID)
|
||||||
|
@ -324,7 +320,7 @@ func (client containerClient) createResponder(resp pipeline.Response) (pipeline.
|
||||||
//
|
//
|
||||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque
|
// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque
|
||||||
|
@ -391,7 +387,7 @@ func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.
|
||||||
//
|
//
|
||||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) {
|
func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) {
|
||||||
|
@ -448,7 +444,7 @@ func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -460,12 +456,50 @@ func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountInfo returns the sku name and account kind
|
||||||
|
func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
|
||||||
|
req, err := client.getAccountInfoPreparer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*ContainerGetAccountInfoResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||||
|
func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("restype", "account")
|
||||||
|
params.Set("comp", "properties")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||||
|
func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned
|
// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned
|
||||||
// does not include the container's list of blobs
|
// does not include the container's list of blobs
|
||||||
//
|
//
|
||||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) {
|
func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) {
|
||||||
|
@ -601,7 +635,7 @@ func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Respons
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -699,7 +733,7 @@ func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Re
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -714,8 +748,8 @@ func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Re
|
||||||
// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be
|
// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be
|
||||||
// 15 to 60 seconds, or can be infinite
|
// 15 to 60 seconds, or can be infinite
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout
|
// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds.
|
||||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
// For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
|
@ -782,8 +816,8 @@ func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pip
|
||||||
// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
|
// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
|
||||||
// to 60 seconds, or can be infinite
|
// to 60 seconds, or can be infinite
|
||||||
//
|
//
|
||||||
// leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. timeout
|
// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds.
|
||||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
// For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
|
@ -853,7 +887,7 @@ func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipel
|
||||||
// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more
|
// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more
|
||||||
// information, see <a
|
// information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and
|
// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and
|
||||||
// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified
|
// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified
|
||||||
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
|
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
|
||||||
|
@ -933,7 +967,7 @@ func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (
|
||||||
//
|
//
|
||||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with
|
// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with
|
||||||
// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to
|
// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to
|
||||||
// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the
|
// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the
|
||||||
|
@ -946,10 +980,7 @@ func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, l
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID)
|
req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID)
|
File diff suppressed because it is too large
Load Diff
|
@ -32,24 +32,24 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
|
||||||
// information, see <a
|
// information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||||
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
|
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearPagesPreparer prepares the ClearPages request.
|
// clearPagesPreparer prepares the ClearPages request.
|
||||||
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -94,8 +94,8 @@ func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *in
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -129,28 +129,20 @@ func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeli
|
||||||
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
|
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
|
||||||
// expressed in seconds. For more information, see <a
|
// expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated
|
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||||
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or
|
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||||
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
|
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||||
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
|
// in the analytics logs when storage analytics logging is enabled.
|
||||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) {
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
|
||||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
|
||||||
func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) {
|
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.copyIncrementalPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -162,7 +154,7 @@ func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource str
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyIncrementalPreparer prepares the CopyIncremental request.
|
// copyIncrementalPreparer prepares the CopyIncremental request.
|
||||||
func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -173,19 +165,14 @@ func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout
|
||||||
}
|
}
|
||||||
params.Set("comp", "incrementalcopy")
|
params.Set("comp", "incrementalcopy")
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
if metadata != nil {
|
|
||||||
for k, v := range metadata {
|
|
||||||
req.Header.Set("x-ms-meta-"+k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ifModifiedSince != nil {
|
if ifModifiedSince != nil {
|
||||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -211,8 +198,9 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
|
||||||
|
|
||||||
// Create the Create operation creates a new page blob.
|
// Create the Create operation creates a new page blob.
|
||||||
//
|
//
|
||||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page
|
||||||
// information, see <a
|
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
|
||||||
|
// expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||||
|
@ -226,28 +214,23 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
|
||||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
|
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentLength is this
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
|
||||||
// header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte
|
// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
|
||||||
// boundary. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
|
// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
|
||||||
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||||
// analytics logging is enabled.
|
|
||||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
{targetValue: metadata,
|
|
||||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
|
||||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentLength, blobSequenceNumber, requestID)
|
req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -259,7 +242,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, ti
|
||||||
}
|
}
|
||||||
|
|
||||||
// createPreparer prepares the Create request.
|
// createPreparer prepares the Create request.
|
||||||
func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -302,15 +285,13 @@ func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32,
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
}
|
}
|
||||||
if blobContentLength != nil {
|
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
|
||||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(*blobContentLength, 10))
|
|
||||||
}
|
|
||||||
if blobSequenceNumber != nil {
|
if blobSequenceNumber != nil {
|
||||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
|
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
|
||||||
}
|
}
|
||||||
|
@ -342,21 +323,21 @@ func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.R
|
||||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||||
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
// analytics logging is enabled.
|
// analytics logging is enabled.
|
||||||
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -368,7 +349,7 @@ func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPageRangesPreparer prepares the GetPageRanges request.
|
// getPageRangesPreparer prepares the GetPageRanges request.
|
||||||
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -394,8 +375,8 @@ func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *in
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -420,7 +401,7 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -432,8 +413,8 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob
|
// GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were
|
||||||
// that were changed between target blob and previous snapshot.
|
// changed between target blob and previous snapshot.
|
||||||
//
|
//
|
||||||
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
||||||
// retrieve. For more information on working with blob snapshots, see <a
|
// retrieve. For more information on working with blob snapshots, see <a
|
||||||
|
@ -445,21 +426,21 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip
|
||||||
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
|
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
|
||||||
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
|
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
|
||||||
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
|
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
|
||||||
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the container's lease is
|
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||||
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
|
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||||
// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs
|
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// logs when storage analytics logging is enabled.
|
// logs when storage analytics logging is enabled.
|
||||||
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -471,7 +452,7 @@ func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *st
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
|
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
|
||||||
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -500,8 +481,8 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -526,7 +507,7 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response)
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -544,21 +525,21 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response)
|
||||||
// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
|
// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
|
||||||
// see <a
|
// see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
|
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||||
// analytics logs when storage analytics logging is enabled.
|
// logs when storage analytics logging is enabled.
|
||||||
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
|
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -570,7 +551,7 @@ func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// resizePreparer prepares the Resize request.
|
// resizePreparer prepares the Resize request.
|
||||||
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -590,8 +571,8 @@ func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *in
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -621,23 +602,23 @@ func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.R
|
||||||
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
|
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
|
||||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
// is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||||
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
|
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||||
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
|
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||||
// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
|
// blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to
|
||||||
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
// track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
||||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
// analytics logging is enabled.
|
// analytics logging is enabled.
|
||||||
func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) {
|
func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: timeout,
|
{targetValue: timeout,
|
||||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobSequenceNumber, requestID)
|
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -649,7 +630,7 @@ func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceN
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
|
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
|
||||||
func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -669,8 +650,8 @@ func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction S
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -700,21 +681,22 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
|
||||||
// UploadPages the Upload Pages operation writes a range of pages to a page blob
|
// UploadPages the Upload Pages operation writes a range of pages to a page blob
|
||||||
//
|
//
|
||||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
|
||||||
|
// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
|
||||||
// information, see <a
|
// information, see <a
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||||
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
|
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: body,
|
{targetValue: body,
|
||||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||||
|
@ -723,7 +705,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req, err := client.uploadPagesPreparer(body, contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -735,7 +717,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadPagesPreparer prepares the UploadPages request.
|
// uploadPagesPreparer prepares the UploadPages request.
|
||||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, pipeline.NewError(err, "failed to create request")
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
@ -747,6 +729,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
|
||||||
params.Set("comp", "page")
|
params.Set("comp", "page")
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
|
if transactionalContentMD5 != nil {
|
||||||
|
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||||
|
}
|
||||||
if rangeParameter != nil {
|
if rangeParameter != nil {
|
||||||
req.Header.Set("x-ms-range", *rangeParameter)
|
req.Header.Set("x-ms-range", *rangeParameter)
|
||||||
}
|
}
|
||||||
|
@ -768,8 +753,8 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
|
||||||
if ifUnmodifiedSince != nil {
|
if ifUnmodifiedSince != nil {
|
||||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
}
|
}
|
||||||
if ifMatches != nil {
|
if ifMatch != nil {
|
||||||
req.Header.Set("If-Match", string(*ifMatches))
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
}
|
}
|
||||||
if ifNoneMatch != nil {
|
if ifNoneMatch != nil {
|
||||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
@ -792,3 +777,120 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel
|
||||||
resp.Response().Body.Close()
|
resp.Response().Body.Close()
|
||||||
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err
|
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read
|
||||||
|
// from a URL
|
||||||
|
//
|
||||||
|
// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The
|
||||||
|
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
|
||||||
|
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
|
||||||
|
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
|
||||||
|
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
||||||
|
// seconds. For more information, see <a
|
||||||
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
|
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||||
|
// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
|
||||||
|
// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
|
||||||
|
// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
|
||||||
|
// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
|
||||||
|
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||||
|
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||||
|
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
|
||||||
|
// specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this
|
||||||
|
// header value to operate only on a blob if it has been modified since the specified date/time.
|
||||||
|
// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||||
|
// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||||
|
// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
|
||||||
|
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||||
|
// analytics logging is enabled.
|
||||||
|
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||||
|
if err := validate([]validation{
|
||||||
|
{targetValue: timeout,
|
||||||
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*PageBlobUploadPagesFromURLResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
|
||||||
|
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
if timeout != nil {
|
||||||
|
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||||
|
}
|
||||||
|
params.Set("comp", "page")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||||
|
req.Header.Set("x-ms-source-range", sourceRange)
|
||||||
|
if sourceContentMD5 != nil {
|
||||||
|
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||||
|
req.Header.Set("x-ms-range", rangeParameter)
|
||||||
|
if leaseID != nil {
|
||||||
|
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||||
|
}
|
||||||
|
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||||
|
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||||
|
}
|
||||||
|
if ifSequenceNumberLessThan != nil {
|
||||||
|
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
|
||||||
|
}
|
||||||
|
if ifSequenceNumberEqualTo != nil {
|
||||||
|
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
|
||||||
|
}
|
||||||
|
if ifModifiedSince != nil {
|
||||||
|
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if ifMatch != nil {
|
||||||
|
req.Header.Set("If-Match", string(*ifMatch))
|
||||||
|
}
|
||||||
|
if ifNoneMatch != nil {
|
||||||
|
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||||
|
}
|
||||||
|
if sourceIfModifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfUnmodifiedSince != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||||
|
}
|
||||||
|
if sourceIfMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
|
||||||
|
}
|
||||||
|
if sourceIfNoneMatch != nil {
|
||||||
|
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
if requestID != nil {
|
||||||
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
|
}
|
||||||
|
req.Header.Set("x-ms-page-write", "update")
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request.
|
||||||
|
func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
|
@ -55,7 +55,7 @@ func validateResponse(resp pipeline.Response, successStatusCodes ...int) error {
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewResponseError(err, resp.Response(), "failed to read response body")
|
return err
|
||||||
}
|
}
|
||||||
// the service code, description and details will be populated during unmarshalling
|
// the service code, description and details will be populated during unmarshalling
|
||||||
responseError := NewResponseError(nil, resp.Response(), resp.Response().Status)
|
responseError := NewResponseError(nil, resp.Response(), resp.Response().Status)
|
|
@ -25,6 +25,44 @@ func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
|
||||||
return serviceClient{newManagementClient(url, p)}
|
return serviceClient{newManagementClient(url, p)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountInfo returns the sku name and account kind
|
||||||
|
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
||||||
|
req, err := client.getAccountInfoPreparer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*ServiceGetAccountInfoResponse), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||||
|
func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("restype", "account")
|
||||||
|
params.Set("comp", "properties")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||||
|
func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||||
|
resp.Response().Body.Close()
|
||||||
|
return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||||
|
}
|
||||||
|
|
||||||
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
|
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
|
||||||
// and CORS (Cross-Origin Resource Sharing) rules.
|
// and CORS (Cross-Origin Resource Sharing) rules.
|
||||||
//
|
//
|
||||||
|
@ -83,7 +121,7 @@ func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipe
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -153,7 +191,86 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
|
}
|
||||||
|
if len(b) > 0 {
|
||||||
|
b = removeBOM(b)
|
||||||
|
err = xml.Unmarshal(b, result)
|
||||||
|
if err != nil {
|
||||||
|
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using
|
||||||
|
// bearer token authentication.
|
||||||
|
//
|
||||||
|
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||||
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
|
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||||
|
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
|
func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) {
|
||||||
|
if err := validate([]validation{
|
||||||
|
{targetValue: timeout,
|
||||||
|
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||||
|
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.(*UserDelegationKey), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUserDelegationKeyPreparer prepares the GetUserDelegationKey request.
|
||||||
|
func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||||
|
req, err := pipeline.NewRequest("POST", client.url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to create request")
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
if timeout != nil {
|
||||||
|
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||||
|
}
|
||||||
|
params.Set("restype", "service")
|
||||||
|
params.Set("comp", "userdelegationkey")
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
req.Header.Set("x-ms-version", ServiceVersion)
|
||||||
|
if requestID != nil {
|
||||||
|
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||||
|
}
|
||||||
|
b, err := xml.Marshal(keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to marshal request body")
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/xml")
|
||||||
|
err = req.SetBody(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return req, pipeline.NewError(err, "failed to set request body")
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request.
|
||||||
|
func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||||
|
err := validateResponse(resp, http.StatusOK)
|
||||||
|
if resp == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result := &UserDelegationKey{rawResponse: resp.Response()}
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
defer resp.Response().Body.Close()
|
||||||
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
||||||
|
@ -183,7 +300,7 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe
|
||||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||||
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) {
|
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
|
||||||
if err := validate([]validation{
|
if err := validate([]validation{
|
||||||
{targetValue: maxresults,
|
{targetValue: maxresults,
|
||||||
constraints: []constraint{{target: "maxresults", name: null, rule: false,
|
constraints: []constraint{{target: "maxresults", name: null, rule: false,
|
||||||
|
@ -201,7 +318,7 @@ func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *s
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return resp.(*ListContainersResponse), err
|
return resp.(*ListContainersSegmentResponse), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listContainersSegmentPreparer prepares the ListContainersSegment request.
|
// listContainersSegmentPreparer prepares the ListContainersSegment request.
|
||||||
|
@ -241,14 +358,14 @@ func (client serviceClient) listContainersSegmentResponder(resp pipeline.Respons
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
result := &ListContainersResponse{rawResponse: resp.Response()}
|
result := &ListContainersSegmentResponse{rawResponse: resp.Response()}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
defer resp.Response().Body.Close()
|
defer resp.Response().Body.Close()
|
||||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, NewResponseError(err, resp.Response(), "failed to read response body")
|
return result, err
|
||||||
}
|
}
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
b = removeBOM(b)
|
b = removeBOM(b)
|
|
@ -5,7 +5,7 @@ package azblob
|
||||||
|
|
||||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||||
func UserAgent() string {
|
func UserAgent() string {
|
||||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-03-28"
|
return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version returns the semantic version (see http://semver.org) of the client.
|
// Version returns the semantic version (see http://semver.org) of the client.
|
|
@ -65,7 +65,7 @@ func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
|
||||||
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
|
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
|
||||||
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
|
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
|
||||||
BlobAccessConditions{
|
BlobAccessConditions{
|
||||||
HTTPAccessConditions: HTTPAccessConditions{IfMatch: getInfo.ETag},
|
ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag},
|
||||||
},
|
},
|
||||||
false)
|
false)
|
||||||
if err != nil {
|
if err != nil {
|
|
@ -0,0 +1,29 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package colorable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewColorable return new instance of Writer which handle escape sequence.
|
||||||
|
func NewColorable(file *os.File) io.Writer {
|
||||||
|
if file == nil {
|
||||||
|
panic("nil passed instead of *os.File to NewColorable()")
|
||||||
|
}
|
||||||
|
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
|
||||||
|
func NewColorableStdout() io.Writer {
|
||||||
|
return os.Stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
|
||||||
|
func NewColorableStderr() io.Writer {
|
||||||
|
return os.Stderr
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files
|
||||||
|
func GetProxyFunc() func(*http.Request) (*url.URL, error) {
|
||||||
|
return proxyMiddleman()
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2014 mattn
|
||||||
|
Copyright (c) 2017 oliverpool
|
||||||
|
Copyright (c) 2019 Adele Reed
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,49 @@
|
||||||
|
# ieproxy
|
||||||
|
|
||||||
|
Go package to detect the proxy settings on Windows platform.
|
||||||
|
|
||||||
|
The settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails.
|
||||||
|
|
||||||
|
For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
You can either obtain a `net/http` compatible proxy function using `ieproxy.GetProxyFunc()`, set environment variables using `ieproxy.OverrideEnvWithStaticProxy()` (though no automatic configuration is available this way), or obtain the proxy settings via `ieproxy.GetConf()`.
|
||||||
|
|
||||||
|
| Method | Supported configuration options: |
|
||||||
|
|----------------------------------------|-----------------------------------------------|
|
||||||
|
| `ieproxy.GetProxyFunc()` | Static, Specified script, and fully automatic |
|
||||||
|
| `ieproxy.OverrideEnvWithStaticProxy()` | Static |
|
||||||
|
| `ieproxy.GetConf()` | Depends on how you use it |
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Using GetProxyFunc():
|
||||||
|
|
||||||
|
```go
|
||||||
|
func init() {
|
||||||
|
http.DefaultTransport.(*http.Transport).Proxy = ieproxy.GetProxyFunc()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
GetProxyFunc acts as a middleman between `net/http` and `mattn/go-ieproxy` in order to select the correct proxy configuration based off the details supplied in the config.
|
||||||
|
|
||||||
|
### Using OverrideEnvWithStaticProxy():
|
||||||
|
|
||||||
|
```go
|
||||||
|
func init() {
|
||||||
|
ieproxy.OverrideEnvWithStaticProxy()
|
||||||
|
http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OverrideEnvWithStaticProxy overrides the relevant environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`) with the **static, manually configured** proxy details typically found in the registry.
|
||||||
|
|
||||||
|
### Using GetConf():
|
||||||
|
|
||||||
|
```go
|
||||||
|
func main() {
|
||||||
|
conf := ieproxy.GetConf()
|
||||||
|
//Handle proxies how you want to.
|
||||||
|
}
|
||||||
|
```
|
|
@ -0,0 +1,51 @@
|
||||||
|
// Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows)
|
||||||
|
//
|
||||||
|
// On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// ProxyConf gathers the configuration for proxy
|
||||||
|
type ProxyConf struct {
|
||||||
|
Static StaticProxyConf // static configuration
|
||||||
|
Automatic ProxyScriptConf // script configuration
|
||||||
|
}
|
||||||
|
|
||||||
|
// StaticProxyConf contains the configuration for static proxy
|
||||||
|
type StaticProxyConf struct {
|
||||||
|
// Is the proxy active?
|
||||||
|
Active bool
|
||||||
|
// Proxy address for each scheme (http, https)
|
||||||
|
// "" (empty string) is the fallback proxy
|
||||||
|
Protocols map[string]string
|
||||||
|
// Addresses not to be browsed via the proxy (comma-separated, linux-like)
|
||||||
|
NoProxy string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyScriptConf contains the configuration for automatic proxy
|
||||||
|
type ProxyScriptConf struct {
|
||||||
|
// Is the proxy active?
|
||||||
|
Active bool
|
||||||
|
// PreConfiguredURL of the .pac file.
|
||||||
|
// If this is empty and Active is true, auto-configuration should be assumed.
|
||||||
|
PreConfiguredURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConf retrieves the proxy configuration from the Windows Regedit
|
||||||
|
func GetConf() ProxyConf {
|
||||||
|
return getConf()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OverrideEnvWithStaticProxy writes new values to the
|
||||||
|
// `http_proxy`, `https_proxy` and `no_proxy` environment variables.
|
||||||
|
// The values are taken from the Windows Regedit (should be called in `init()` function - see example)
|
||||||
|
func OverrideEnvWithStaticProxy() {
|
||||||
|
overrideEnvWithStaticProxy(GetConf(), os.Setenv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindProxyForURL computes the proxy for a given URL according to the pac file
|
||||||
|
func (psc *ProxyScriptConf) FindProxyForURL(URL string) string {
|
||||||
|
return psc.findProxyForURL(URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
type envSetter func(string, string) error
|
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
func getConf() ProxyConf {
|
||||||
|
return ProxyConf{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) {
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
type regeditValues struct {
|
||||||
|
ProxyServer string
|
||||||
|
ProxyOverride string
|
||||||
|
ProxyEnable uint64
|
||||||
|
AutoConfigURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
var once sync.Once
|
||||||
|
var windowsProxyConf ProxyConf
|
||||||
|
|
||||||
|
// GetConf retrieves the proxy configuration from the Windows Regedit
|
||||||
|
func getConf() ProxyConf {
|
||||||
|
once.Do(writeConf)
|
||||||
|
return windowsProxyConf
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeConf() {
|
||||||
|
var (
|
||||||
|
cfg *tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg, err = getUserConfigFromWindowsSyscall(); err != nil {
|
||||||
|
regedit, _ := readRegedit() // If the syscall fails, backup to manual detection.
|
||||||
|
windowsProxyConf = parseRegedit(regedit)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer globalFreeWrapper(cfg.lpszProxy)
|
||||||
|
defer globalFreeWrapper(cfg.lpszProxyBypass)
|
||||||
|
defer globalFreeWrapper(cfg.lpszAutoConfigUrl)
|
||||||
|
|
||||||
|
windowsProxyConf = ProxyConf{
|
||||||
|
Static: StaticProxyConf{
|
||||||
|
Active: cfg.lpszProxy != nil,
|
||||||
|
},
|
||||||
|
Automatic: ProxyScriptConf{
|
||||||
|
Active: cfg.lpszAutoConfigUrl != nil || cfg.fAutoDetect,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if windowsProxyConf.Static.Active {
|
||||||
|
protocol := make(map[string]string)
|
||||||
|
for _, s := range strings.Split(StringFromUTF16Ptr(cfg.lpszProxy), ";") {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pair := strings.SplitN(s, "=", 2)
|
||||||
|
if len(pair) > 1 {
|
||||||
|
protocol[pair[0]] = pair[1]
|
||||||
|
} else {
|
||||||
|
protocol[""] = pair[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
windowsProxyConf.Static.Protocols = protocol
|
||||||
|
if cfg.lpszProxyBypass != nil {
|
||||||
|
windowsProxyConf.Static.NoProxy = strings.Replace(StringFromUTF16Ptr(cfg.lpszProxyBypass), ";", ",", -1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if windowsProxyConf.Automatic.Active {
|
||||||
|
windowsProxyConf.Automatic.PreConfiguredURL = StringFromUTF16Ptr(cfg.lpszAutoConfigUrl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) {
|
||||||
|
handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0)
|
||||||
|
if handle == 0 {
|
||||||
|
return &tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG{}, err
|
||||||
|
}
|
||||||
|
defer winHttpCloseHandle.Call(handle)
|
||||||
|
|
||||||
|
config := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG)
|
||||||
|
|
||||||
|
ret, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(config)))
|
||||||
|
if ret > 0 {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// OverrideEnvWithStaticProxy writes new values to the
|
||||||
|
// http_proxy, https_proxy and no_proxy environment variables.
|
||||||
|
// The values are taken from the Windows Regedit (should be called in init() function)
|
||||||
|
func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) {
|
||||||
|
if conf.Static.Active {
|
||||||
|
for _, scheme := range []string{"http", "https"} {
|
||||||
|
url := mapFallback(scheme, "", conf.Static.Protocols)
|
||||||
|
setenv(scheme+"_proxy", url)
|
||||||
|
}
|
||||||
|
if conf.Static.NoProxy != "" {
|
||||||
|
setenv("no_proxy", conf.Static.NoProxy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRegedit(regedit regeditValues) ProxyConf {
|
||||||
|
protocol := make(map[string]string)
|
||||||
|
for _, s := range strings.Split(regedit.ProxyServer, ";") {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pair := strings.SplitN(s, "=", 2)
|
||||||
|
if len(pair) > 1 {
|
||||||
|
protocol[pair[0]] = pair[1]
|
||||||
|
} else {
|
||||||
|
protocol[""] = pair[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProxyConf{
|
||||||
|
Static: StaticProxyConf{
|
||||||
|
Active: regedit.ProxyEnable > 0,
|
||||||
|
Protocols: protocol,
|
||||||
|
NoProxy: strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style
|
||||||
|
},
|
||||||
|
Automatic: ProxyScriptConf{
|
||||||
|
Active: regedit.AutoConfigURL != "",
|
||||||
|
PreConfiguredURL: regedit.AutoConfigURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRegedit() (values regeditValues, err error) {
|
||||||
|
k, err := registry.OpenKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer k.Close()
|
||||||
|
|
||||||
|
values.ProxyServer, _, err = k.GetStringValue("ProxyServer")
|
||||||
|
if err != nil && err != registry.ErrNotExist {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride")
|
||||||
|
if err != nil && err != registry.ErrNotExist {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable")
|
||||||
|
if err != nil && err != registry.ErrNotExist {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL")
|
||||||
|
if err != nil && err != registry.ErrNotExist {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
var globalFree = kernel32.NewProc("GlobalFree")
|
||||||
|
|
||||||
|
func globalFreeWrapper(ptr *uint16) {
|
||||||
|
if ptr != nil {
|
||||||
|
_, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr)))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
func (psc *ProxyScriptConf) findProxyForURL(URL string) string {
|
||||||
|
return ""
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (psc *ProxyScriptConf) findProxyForURL(URL string) string {
|
||||||
|
if !psc.Active {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL)
|
||||||
|
i := strings.Index(proxy, ";")
|
||||||
|
if i >= 0 {
|
||||||
|
return proxy[:i]
|
||||||
|
}
|
||||||
|
return proxy
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProxyForURL(pacfileURL, URL string) (string, error) {
|
||||||
|
pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
URLPtr, err := syscall.UTF16PtrFromString(URL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0)
|
||||||
|
if handle == 0 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer winHttpCloseHandle.Call(handle)
|
||||||
|
|
||||||
|
dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL
|
||||||
|
dwAutoDetectFlags := autoDetectFlag(0)
|
||||||
|
pfURLptr := pacfileURLPtr
|
||||||
|
|
||||||
|
if pacfileURL == "" {
|
||||||
|
dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT
|
||||||
|
dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP
|
||||||
|
pfURLptr = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
options := tWINHTTP_AUTOPROXY_OPTIONS{
|
||||||
|
dwFlags: dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6
|
||||||
|
dwAutoDetectFlags: dwAutoDetectFlags,
|
||||||
|
lpszAutoConfigUrl: pfURLptr,
|
||||||
|
lpvReserved: nil,
|
||||||
|
dwReserved: 0,
|
||||||
|
fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx
|
||||||
|
} // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned.
|
||||||
|
// in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned.
|
||||||
|
|
||||||
|
info := new(tWINHTTP_PROXY_INFO)
|
||||||
|
|
||||||
|
ret, _, err := winHttpGetProxyForURL.Call(
|
||||||
|
handle,
|
||||||
|
uintptr(unsafe.Pointer(URLPtr)),
|
||||||
|
uintptr(unsafe.Pointer(&options)),
|
||||||
|
uintptr(unsafe.Pointer(info)),
|
||||||
|
)
|
||||||
|
if ret > 0 {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer globalFreeWrapper(info.lpszProxyBypass)
|
||||||
|
defer globalFreeWrapper(info.lpszProxy)
|
||||||
|
return StringFromUTF16Ptr(info.lpszProxy), err
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) {
|
||||||
|
// Fallthrough to ProxyFromEnvironment on all other OSes.
|
||||||
|
return http.ProxyFromEnvironment
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/http/httpproxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) {
|
||||||
|
// Get the proxy configuration
|
||||||
|
conf := GetConf()
|
||||||
|
envcfg := httpproxy.FromEnvironment()
|
||||||
|
|
||||||
|
if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" {
|
||||||
|
// If the user manually specifies environment variables, prefer those over the Windows config.
|
||||||
|
return http.ProxyFromEnvironment
|
||||||
|
} else if conf.Automatic.Active {
|
||||||
|
// If automatic proxy obtaining is specified
|
||||||
|
return func(req *http.Request) (i *url.URL, e error) {
|
||||||
|
host := conf.Automatic.FindProxyForURL(req.URL.String())
|
||||||
|
if host == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return &url.URL{Host: host}, nil
|
||||||
|
}
|
||||||
|
} else if conf.Static.Active {
|
||||||
|
// If static proxy obtaining is specified
|
||||||
|
prox := httpproxy.Config{
|
||||||
|
HTTPSProxy: mapFallback("https", "", conf.Static.Protocols),
|
||||||
|
HTTPProxy: mapFallback("http", "", conf.Static.Protocols),
|
||||||
|
NoProxy: conf.Static.NoProxy,
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(req *http.Request) (i *url.URL, e error) {
|
||||||
|
return prox.ProxyFunc()(req.URL)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Final fallthrough case; use the environment variables.
|
||||||
|
return http.ProxyFromEnvironment
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return oKey or fbKey if oKey doesn't exist in the map.
|
||||||
|
func mapFallback(oKey, fbKey string, m map[string]string) string {
|
||||||
|
if v, ok := m[oKey]; ok {
|
||||||
|
return v
|
||||||
|
} else {
|
||||||
|
return m[fbKey]
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringFromUTF16Ptr converts a *uint16 C string to a Go String
|
||||||
|
func StringFromUTF16Ptr(s *uint16) string {
|
||||||
|
if s == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s))
|
||||||
|
|
||||||
|
// find the string length
|
||||||
|
sz := 0
|
||||||
|
for p[sz] != 0 {
|
||||||
|
sz++
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(utf16.Decode(p[:sz:sz]))
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package ieproxy
|
||||||
|
|
||||||
|
import "golang.org/x/sys/windows"
|
||||||
|
|
||||||
|
var winHttp = windows.NewLazySystemDLL("winhttp.dll")
|
||||||
|
var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl")
|
||||||
|
var winHttpOpen = winHttp.NewProc("WinHttpOpen")
|
||||||
|
var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle")
|
||||||
|
var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser")
|
||||||
|
|
||||||
|
type tWINHTTP_AUTOPROXY_OPTIONS struct {
|
||||||
|
dwFlags autoProxyFlag
|
||||||
|
dwAutoDetectFlags autoDetectFlag
|
||||||
|
lpszAutoConfigUrl *uint16
|
||||||
|
lpvReserved *uint16
|
||||||
|
dwReserved uint32
|
||||||
|
fAutoLogonIfChallenged bool
|
||||||
|
}
|
||||||
|
type autoProxyFlag uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
fWINHTTP_AUTOPROXY_AUTO_DETECT = autoProxyFlag(0x00000001)
|
||||||
|
fWINHTTP_AUTOPROXY_CONFIG_URL = autoProxyFlag(0x00000002)
|
||||||
|
fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT = autoProxyFlag(0x00080000)
|
||||||
|
fWINHTTP_AUTOPROXY_NO_CACHE_SVC = autoProxyFlag(0x00100000)
|
||||||
|
fWINHTTP_AUTOPROXY_NO_DIRECTACCESS = autoProxyFlag(0x00040000)
|
||||||
|
fWINHTTP_AUTOPROXY_RUN_INPROCESS = autoProxyFlag(0x00010000)
|
||||||
|
fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000)
|
||||||
|
fWINHTTP_AUTOPROXY_SORT_RESULTS = autoProxyFlag(0x00400000)
|
||||||
|
)
|
||||||
|
|
||||||
|
type autoDetectFlag uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
fWINHTTP_AUTO_DETECT_TYPE_DHCP = autoDetectFlag(0x00000001)
|
||||||
|
fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002)
|
||||||
|
)
|
||||||
|
|
||||||
|
type tWINHTTP_PROXY_INFO struct {
|
||||||
|
dwAccessType uint32
|
||||||
|
lpszProxy *uint16
|
||||||
|
lpszProxyBypass *uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct {
|
||||||
|
fAutoDetect bool
|
||||||
|
lpszAutoConfigUrl *uint16
|
||||||
|
lpszProxy *uint16
|
||||||
|
lpszProxyBypass *uint16
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,370 @@
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package httpproxy provides support for HTTP proxy determination
|
||||||
|
// based on environment variables, as provided by net/http's
|
||||||
|
// ProxyFromEnvironment function.
|
||||||
|
//
|
||||||
|
// The API is not subject to the Go 1 compatibility promise and may change at
|
||||||
|
// any time.
|
||||||
|
package httpproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/net/idna"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds configuration for HTTP proxy settings. See
|
||||||
|
// FromEnvironment for details.
|
||||||
|
type Config struct {
|
||||||
|
// HTTPProxy represents the value of the HTTP_PROXY or
|
||||||
|
// http_proxy environment variable. It will be used as the proxy
|
||||||
|
// URL for HTTP requests and HTTPS requests unless overridden by
|
||||||
|
// HTTPSProxy or NoProxy.
|
||||||
|
HTTPProxy string
|
||||||
|
|
||||||
|
// HTTPSProxy represents the HTTPS_PROXY or https_proxy
|
||||||
|
// environment variable. It will be used as the proxy URL for
|
||||||
|
// HTTPS requests unless overridden by NoProxy.
|
||||||
|
HTTPSProxy string
|
||||||
|
|
||||||
|
// NoProxy represents the NO_PROXY or no_proxy environment
|
||||||
|
// variable. It specifies a string that contains comma-separated values
|
||||||
|
// specifying hosts that should be excluded from proxying. Each value is
|
||||||
|
// represented by an IP address prefix (1.2.3.4), an IP address prefix in
|
||||||
|
// CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
|
||||||
|
// An IP address prefix and domain name can also include a literal port
|
||||||
|
// number (1.2.3.4:80).
|
||||||
|
// A domain name matches that name and all subdomains. A domain name with
|
||||||
|
// a leading "." matches subdomains only. For example "foo.com" matches
|
||||||
|
// "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
|
||||||
|
// A single asterisk (*) indicates that no proxying should be done.
|
||||||
|
// A best effort is made to parse the string and errors are
|
||||||
|
// ignored.
|
||||||
|
NoProxy string
|
||||||
|
|
||||||
|
// CGI holds whether the current process is running
|
||||||
|
// as a CGI handler (FromEnvironment infers this from the
|
||||||
|
// presence of a REQUEST_METHOD environment variable).
|
||||||
|
// When this is set, ProxyForURL will return an error
|
||||||
|
// when HTTPProxy applies, because a client could be
|
||||||
|
// setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
|
||||||
|
CGI bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// config holds the parsed configuration for HTTP proxy settings.
|
||||||
|
type config struct {
|
||||||
|
// Config represents the original configuration as defined above.
|
||||||
|
Config
|
||||||
|
|
||||||
|
// httpsProxy is the parsed URL of the HTTPSProxy if defined.
|
||||||
|
httpsProxy *url.URL
|
||||||
|
|
||||||
|
// httpProxy is the parsed URL of the HTTPProxy if defined.
|
||||||
|
httpProxy *url.URL
|
||||||
|
|
||||||
|
// ipMatchers represent all values in the NoProxy that are IP address
|
||||||
|
// prefixes or an IP address in CIDR notation.
|
||||||
|
ipMatchers []matcher
|
||||||
|
|
||||||
|
// domainMatchers represent all values in the NoProxy that are a domain
|
||||||
|
// name or hostname & domain name
|
||||||
|
domainMatchers []matcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromEnvironment returns a Config instance populated from the
|
||||||
|
// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
|
||||||
|
// lowercase versions thereof). HTTPS_PROXY takes precedence over
|
||||||
|
// HTTP_PROXY for https requests.
|
||||||
|
//
|
||||||
|
// The environment values may be either a complete URL or a
|
||||||
|
// "host[:port]", in which case the "http" scheme is assumed. An error
|
||||||
|
// is returned if the value is a different form.
|
||||||
|
func FromEnvironment() *Config {
|
||||||
|
return &Config{
|
||||||
|
HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"),
|
||||||
|
HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"),
|
||||||
|
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
|
||||||
|
CGI: os.Getenv("REQUEST_METHOD") != "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnvAny(names ...string) string {
|
||||||
|
for _, n := range names {
|
||||||
|
if val := os.Getenv(n); val != "" {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyFunc returns a function that determines the proxy URL to use for
|
||||||
|
// a given request URL. Changing the contents of cfg will not affect
|
||||||
|
// proxy functions created earlier.
|
||||||
|
//
|
||||||
|
// A nil URL and nil error are returned if no proxy is defined in the
|
||||||
|
// environment, or a proxy should not be used for the given request, as
|
||||||
|
// defined by NO_PROXY.
|
||||||
|
//
|
||||||
|
// As a special case, if req.URL.Host is "localhost" (with or without a
|
||||||
|
// port number), then a nil URL and nil error will be returned.
|
||||||
|
func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
|
||||||
|
// Preprocess the Config settings for more efficient evaluation.
|
||||||
|
cfg1 := &config{
|
||||||
|
Config: *cfg,
|
||||||
|
}
|
||||||
|
cfg1.init()
|
||||||
|
return cfg1.proxyForURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) {
|
||||||
|
var proxy *url.URL
|
||||||
|
if reqURL.Scheme == "https" {
|
||||||
|
proxy = cfg.httpsProxy
|
||||||
|
}
|
||||||
|
if proxy == nil {
|
||||||
|
proxy = cfg.httpProxy
|
||||||
|
if proxy != nil && cfg.CGI {
|
||||||
|
return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if proxy == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if !cfg.useProxy(canonicalAddr(reqURL)) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return proxy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProxy(proxy string) (*url.URL, error) {
|
||||||
|
if proxy == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyURL, err := url.Parse(proxy)
|
||||||
|
if err != nil ||
|
||||||
|
(proxyURL.Scheme != "http" &&
|
||||||
|
proxyURL.Scheme != "https" &&
|
||||||
|
proxyURL.Scheme != "socks5") {
|
||||||
|
// proxy was bogus. Try prepending "http://" to it and
|
||||||
|
// see if that parses correctly. If not, we fall
|
||||||
|
// through and complain about the original one.
|
||||||
|
if proxyURL, err := url.Parse("http://" + proxy); err == nil {
|
||||||
|
return proxyURL, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
|
||||||
|
}
|
||||||
|
return proxyURL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// useProxy reports whether requests to addr should use a proxy,
|
||||||
|
// according to the NO_PROXY or no_proxy environment variable.
|
||||||
|
// addr is always a canonicalAddr with a host and port.
|
||||||
|
func (cfg *config) useProxy(addr string) bool {
|
||||||
|
if len(addr) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
host, port, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if host == "localhost" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(host)
|
||||||
|
if ip != nil {
|
||||||
|
if ip.IsLoopback() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = strings.ToLower(strings.TrimSpace(host))
|
||||||
|
|
||||||
|
if ip != nil {
|
||||||
|
for _, m := range cfg.ipMatchers {
|
||||||
|
if m.match(addr, port, ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, m := range cfg.domainMatchers {
|
||||||
|
if m.match(addr, port, ip) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *config) init() {
|
||||||
|
if parsed, err := parseProxy(c.HTTPProxy); err == nil {
|
||||||
|
c.httpProxy = parsed
|
||||||
|
}
|
||||||
|
if parsed, err := parseProxy(c.HTTPSProxy); err == nil {
|
||||||
|
c.httpsProxy = parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range strings.Split(c.NoProxy, ",") {
|
||||||
|
p = strings.ToLower(strings.TrimSpace(p))
|
||||||
|
if len(p) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == "*" {
|
||||||
|
c.ipMatchers = []matcher{allMatch{}}
|
||||||
|
c.domainMatchers = []matcher{allMatch{}}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv4/CIDR, IPv6/CIDR
|
||||||
|
if _, pnet, err := net.ParseCIDR(p); err == nil {
|
||||||
|
c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPv4:port, [IPv6]:port
|
||||||
|
phost, pport, err := net.SplitHostPort(p)
|
||||||
|
if err == nil {
|
||||||
|
if len(phost) == 0 {
|
||||||
|
// There is no host part, likely the entry is malformed; ignore.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if phost[0] == '[' && phost[len(phost)-1] == ']' {
|
||||||
|
phost = phost[1 : len(phost)-1]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
phost = p
|
||||||
|
}
|
||||||
|
// IPv4, IPv6
|
||||||
|
if pip := net.ParseIP(phost); pip != nil {
|
||||||
|
c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(phost) == 0 {
|
||||||
|
// There is no host part, likely the entry is malformed; ignore.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// domain.com or domain.com:80
|
||||||
|
// foo.com matches bar.foo.com
|
||||||
|
// .domain.com or .domain.com:port
|
||||||
|
// *.domain.com or *.domain.com:port
|
||||||
|
if strings.HasPrefix(phost, "*.") {
|
||||||
|
phost = phost[1:]
|
||||||
|
}
|
||||||
|
matchHost := false
|
||||||
|
if phost[0] != '.' {
|
||||||
|
matchHost = true
|
||||||
|
phost = "." + phost
|
||||||
|
}
|
||||||
|
c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var portMap = map[string]string{
|
||||||
|
"http": "80",
|
||||||
|
"https": "443",
|
||||||
|
"socks5": "1080",
|
||||||
|
}
|
||||||
|
|
||||||
|
// canonicalAddr returns url.Host but always with a ":port" suffix
|
||||||
|
func canonicalAddr(url *url.URL) string {
|
||||||
|
addr := url.Hostname()
|
||||||
|
if v, err := idnaASCII(addr); err == nil {
|
||||||
|
addr = v
|
||||||
|
}
|
||||||
|
port := url.Port()
|
||||||
|
if port == "" {
|
||||||
|
port = portMap[url.Scheme]
|
||||||
|
}
|
||||||
|
return net.JoinHostPort(addr, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||||
|
// return true if the string includes a port.
|
||||||
|
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||||
|
|
||||||
|
func idnaASCII(v string) (string, error) {
|
||||||
|
// TODO: Consider removing this check after verifying performance is okay.
|
||||||
|
// Right now punycode verification, length checks, context checks, and the
|
||||||
|
// permissible character tests are all omitted. It also prevents the ToASCII
|
||||||
|
// call from salvaging an invalid IDN, when possible. As a result it may be
|
||||||
|
// possible to have two IDNs that appear identical to the user where the
|
||||||
|
// ASCII-only version causes an error downstream whereas the non-ASCII
|
||||||
|
// version does not.
|
||||||
|
// Note that for correct ASCII IDNs ToASCII will only do considerably more
|
||||||
|
// work, but it will not cause an allocation.
|
||||||
|
if isASCII(v) {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
return idna.Lookup.ToASCII(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isASCII(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] >= utf8.RuneSelf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// matcher represents the matching rule for a given value in the NO_PROXY list
|
||||||
|
type matcher interface {
|
||||||
|
// match returns true if the host and optional port or ip and optional port
|
||||||
|
// are allowed
|
||||||
|
match(host, port string, ip net.IP) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// allMatch matches on all possible inputs
|
||||||
|
type allMatch struct{}
|
||||||
|
|
||||||
|
func (a allMatch) match(host, port string, ip net.IP) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type cidrMatch struct {
|
||||||
|
cidr *net.IPNet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m cidrMatch) match(host, port string, ip net.IP) bool {
|
||||||
|
return m.cidr.Contains(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ipMatch struct {
|
||||||
|
ip net.IP
|
||||||
|
port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m ipMatch) match(host, port string, ip net.IP) bool {
|
||||||
|
if m.ip.Equal(ip) {
|
||||||
|
return m.port == "" || m.port == port
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type domainMatch struct {
|
||||||
|
host string
|
||||||
|
port string
|
||||||
|
|
||||||
|
matchHost bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m domainMatch) match(host, port string, ip net.IP) bool {
|
||||||
|
if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) {
|
||||||
|
return m.port == "" || m.port == port
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,734 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.10
|
||||||
|
|
||||||
|
// Package idna implements IDNA2008 using the compatibility processing
|
||||||
|
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||||
|
// deal with the transition from IDNA2003.
|
||||||
|
//
|
||||||
|
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||||
|
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||||
|
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||||
|
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||||
|
// differences between these two standards.
|
||||||
|
package idna // import "golang.org/x/net/idna"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/secure/bidirule"
|
||||||
|
"golang.org/x/text/unicode/bidi"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||||
|
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||||
|
// evaluated string as lookup.
|
||||||
|
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||||
|
// Other strategies are also viable, though:
|
||||||
|
// Option 1) Return an empty string in case of error, but allow the user to
|
||||||
|
// specify explicitly which errors to ignore.
|
||||||
|
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||||
|
// string, otherwise return the empty string in case of error.
|
||||||
|
// Option 3) Option 1 and 2.
|
||||||
|
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||||
|
// needed, and document that the return string may not be empty in case of
|
||||||
|
// error in the future.
|
||||||
|
// I think Option 1 is best, but it is quite opinionated.
|
||||||
|
|
||||||
|
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||||
|
func ToASCII(s string) (string, error) {
|
||||||
|
return Punycode.process(s, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||||
|
func ToUnicode(s string) (string, error) {
|
||||||
|
return Punycode.process(s, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Option configures a Profile at creation time.
|
||||||
|
type Option func(*options)
|
||||||
|
|
||||||
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
|
// compatibility. It is used by most browsers when resolving domain names. This
|
||||||
|
// option is only meaningful if combined with MapForLookup.
|
||||||
|
func Transitional(transitional bool) Option {
|
||||||
|
return func(o *options) { o.transitional = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
|
// are longer than allowed by the RFC.
|
||||||
|
func VerifyDNSLength(verify bool) Option {
|
||||||
|
return func(o *options) { o.verifyDNSLength = verify }
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||||
|
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||||
|
//
|
||||||
|
// This is the behavior suggested by the UTS #46 and is adopted by some
|
||||||
|
// browsers.
|
||||||
|
func RemoveLeadingDots(remove bool) Option {
|
||||||
|
return func(o *options) { o.removeLeadingDots = remove }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||||
|
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||||
|
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||||
|
func ValidateLabels(enable bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
// Don't override existing mappings, but set one that at least checks
|
||||||
|
// normalization if it is not set.
|
||||||
|
if o.mapping == nil && enable {
|
||||||
|
o.mapping = normalize
|
||||||
|
}
|
||||||
|
o.trie = trie
|
||||||
|
o.validateLabels = enable
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||||
|
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||||
|
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
||||||
|
//
|
||||||
|
// This option is useful, for instance, for browsers that allow characters
|
||||||
|
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||||
|
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
||||||
|
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
||||||
|
func StrictDomainName(use bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.trie = trie
|
||||||
|
o.useSTD3Rules = use
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: the following options pull in tables. The tables should not be linked
|
||||||
|
// in as long as the options are not used.
|
||||||
|
|
||||||
|
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||||
|
// that relies on proper validation of labels should include this rule.
|
||||||
|
func BidiRule() Option {
|
||||||
|
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||||
|
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||||
|
func ValidateForRegistration() Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.mapping = validateRegistration
|
||||||
|
StrictDomainName(true)(o)
|
||||||
|
ValidateLabels(true)(o)
|
||||||
|
VerifyDNSLength(true)(o)
|
||||||
|
BidiRule()(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||||
|
// transformed for domain name lookup according to the requirements set out in
|
||||||
|
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||||
|
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||||
|
// to add this check.
|
||||||
|
//
|
||||||
|
// The mappings include normalization and mapping case, width and other
|
||||||
|
// compatibility mappings.
|
||||||
|
func MapForLookup() Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.mapping = validateAndMap
|
||||||
|
StrictDomainName(true)(o)
|
||||||
|
ValidateLabels(true)(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
transitional bool
|
||||||
|
useSTD3Rules bool
|
||||||
|
validateLabels bool
|
||||||
|
verifyDNSLength bool
|
||||||
|
removeLeadingDots bool
|
||||||
|
|
||||||
|
trie *idnaTrie
|
||||||
|
|
||||||
|
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||||
|
fromPuny func(p *Profile, s string) error
|
||||||
|
|
||||||
|
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||||
|
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||||
|
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
||||||
|
|
||||||
|
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||||
|
// defined in RFC 5893.
|
||||||
|
bidirule func(s string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Profile defines the configuration of an IDNA mapper.
|
||||||
|
type Profile struct {
|
||||||
|
options
|
||||||
|
}
|
||||||
|
|
||||||
|
func apply(o *options, opts []Option) {
|
||||||
|
for _, f := range opts {
|
||||||
|
f(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Profile.
|
||||||
|
//
|
||||||
|
// With no options, the returned Profile is the most permissive and equals the
|
||||||
|
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||||
|
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||||
|
// for lookup and registration purposes respectively, which can be tailored by
|
||||||
|
// adding more fine-grained options, where later options override earlier
|
||||||
|
// options.
|
||||||
|
func New(o ...Option) *Profile {
|
||||||
|
p := &Profile{}
|
||||||
|
apply(&p.options, o)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||||
|
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||||
|
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||||
|
// an error and a (partially) processed result.
|
||||||
|
func (p *Profile) ToASCII(s string) (string, error) {
|
||||||
|
return p.process(s, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||||
|
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||||
|
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||||
|
// an error and a (partially) processed result.
|
||||||
|
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||||
|
pp := *p
|
||||||
|
pp.transitional = false
|
||||||
|
return pp.process(s, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String reports a string with a description of the profile for debugging
|
||||||
|
// purposes. The string format may change with different versions.
|
||||||
|
func (p *Profile) String() string {
|
||||||
|
s := ""
|
||||||
|
if p.transitional {
|
||||||
|
s = "Transitional"
|
||||||
|
} else {
|
||||||
|
s = "NonTransitional"
|
||||||
|
}
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
s += ":UseSTD3Rules"
|
||||||
|
}
|
||||||
|
if p.validateLabels {
|
||||||
|
s += ":ValidateLabels"
|
||||||
|
}
|
||||||
|
if p.verifyDNSLength {
|
||||||
|
s += ":VerifyDNSLength"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||||
|
// of validation.
|
||||||
|
Punycode *Profile = punycode
|
||||||
|
|
||||||
|
// Lookup is the recommended profile for looking up domain names, according
|
||||||
|
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||||
|
// change over time.
|
||||||
|
Lookup *Profile = lookup
|
||||||
|
|
||||||
|
// Display is the recommended profile for displaying domain names.
|
||||||
|
// The configuration of this profile may change over time.
|
||||||
|
Display *Profile = display
|
||||||
|
|
||||||
|
// Registration is the recommended profile for checking whether a given
|
||||||
|
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||||
|
Registration *Profile = registration
|
||||||
|
|
||||||
|
punycode = &Profile{}
|
||||||
|
lookup = &Profile{options{
|
||||||
|
transitional: true,
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
display = &Profile{options{
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
registration = &Profile{options{
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
verifyDNSLength: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateRegistration,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
|
||||||
|
// TODO: profiles
|
||||||
|
// Register: recommended for approving domain names: don't do any mappings
|
||||||
|
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||||
|
)
|
||||||
|
|
||||||
|
type labelError struct{ label, code_ string }
|
||||||
|
|
||||||
|
func (e labelError) code() string { return e.code_ }
|
||||||
|
func (e labelError) Error() string {
|
||||||
|
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||||
|
}
|
||||||
|
|
||||||
|
type runeError rune
|
||||||
|
|
||||||
|
func (e runeError) code() string { return "P1" }
|
||||||
|
func (e runeError) Error() string {
|
||||||
|
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// process implements the algorithm described in section 4 of UTS #46,
|
||||||
|
// see https://www.unicode.org/reports/tr46.
|
||||||
|
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||||
|
var err error
|
||||||
|
var isBidi bool
|
||||||
|
if p.mapping != nil {
|
||||||
|
s, isBidi, err = p.mapping(p, s)
|
||||||
|
}
|
||||||
|
// Remove leading empty labels.
|
||||||
|
if p.removeLeadingDots {
|
||||||
|
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: allow for a quick check of the tables data.
|
||||||
|
// It seems like we should only create this error on ToASCII, but the
|
||||||
|
// UTS 46 conformance tests suggests we should always check this.
|
||||||
|
if err == nil && p.verifyDNSLength && s == "" {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
labels := labelIter{orig: s}
|
||||||
|
for ; !labels.done(); labels.next() {
|
||||||
|
label := labels.label()
|
||||||
|
if label == "" {
|
||||||
|
// Empty labels are not okay. The label iterator skips the last
|
||||||
|
// label if it is empty.
|
||||||
|
if err == nil && p.verifyDNSLength {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(label, acePrefix) {
|
||||||
|
u, err2 := decode(label[len(acePrefix):])
|
||||||
|
if err2 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
// Spec says keep the old label.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||||
|
labels.set(u)
|
||||||
|
if err == nil && p.validateLabels {
|
||||||
|
err = p.fromPuny(p, u)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// This should be called on NonTransitional, according to the
|
||||||
|
// spec, but that currently does not have any effect. Use the
|
||||||
|
// original profile to preserve options.
|
||||||
|
err = p.validateLabel(u)
|
||||||
|
}
|
||||||
|
} else if err == nil {
|
||||||
|
err = p.validateLabel(label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isBidi && p.bidirule != nil && err == nil {
|
||||||
|
for labels.reset(); !labels.done(); labels.next() {
|
||||||
|
if !p.bidirule(labels.label()) {
|
||||||
|
err = &labelError{s, "B"}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if toASCII {
|
||||||
|
for labels.reset(); !labels.done(); labels.next() {
|
||||||
|
label := labels.label()
|
||||||
|
if !ascii(label) {
|
||||||
|
a, err2 := encode(acePrefix, label)
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
label = a
|
||||||
|
labels.set(a)
|
||||||
|
}
|
||||||
|
n := len(label)
|
||||||
|
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||||
|
err = &labelError{label, "A4"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s = labels.result()
|
||||||
|
if toASCII && p.verifyDNSLength && err == nil {
|
||||||
|
// Compute the length of the domain name minus the root label and its dot.
|
||||||
|
n := len(s)
|
||||||
|
if n > 0 && s[n-1] == '.' {
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if len(s) < 1 || n > 253 {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
||||||
|
// TODO: consider first doing a quick check to see if any of these checks
|
||||||
|
// need to be done. This will make it slower in the general case, but
|
||||||
|
// faster in the common case.
|
||||||
|
mapped = norm.NFC.String(s)
|
||||||
|
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
||||||
|
return mapped, isBidi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
||||||
|
// TODO: filter need for normalization in loop below.
|
||||||
|
if !norm.NFC.IsNormalString(s) {
|
||||||
|
return s, false, &labelError{s, "V1"}
|
||||||
|
}
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
if sz == 0 {
|
||||||
|
return s, bidi, runeError(utf8.RuneError)
|
||||||
|
}
|
||||||
|
bidi = bidi || info(v).isBidi(s[i:])
|
||||||
|
// Copy bytes not copied so far.
|
||||||
|
switch p.simplify(info(v).category()) {
|
||||||
|
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||||
|
// for strict conformance to IDNA2008.
|
||||||
|
case valid, deviation:
|
||||||
|
case disallowed, mapped, unknown, ignored:
|
||||||
|
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||||
|
return s, bidi, runeError(r)
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return s, bidi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c info) isBidi(s string) bool {
|
||||||
|
if !c.isMapped() {
|
||||||
|
return c&attributesMask == rtl
|
||||||
|
}
|
||||||
|
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
||||||
|
// cumbersome and not for the common case.
|
||||||
|
p, _ := bidi.LookupString(s)
|
||||||
|
switch p.Class() {
|
||||||
|
case bidi.R, bidi.AL, bidi.AN:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
||||||
|
var (
|
||||||
|
b []byte
|
||||||
|
k int
|
||||||
|
)
|
||||||
|
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
||||||
|
// to derive the mayNeedNorm bit later. This may trigger normalization
|
||||||
|
// overeagerly, but it will not do so in the common case. The end result
|
||||||
|
// is another 10% saving on BenchmarkProfile for the common case.
|
||||||
|
var combinedInfoBits info
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
if sz == 0 {
|
||||||
|
b = append(b, s[k:i]...)
|
||||||
|
b = append(b, "\ufffd"...)
|
||||||
|
k = len(s)
|
||||||
|
if err == nil {
|
||||||
|
err = runeError(utf8.RuneError)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
combinedInfoBits |= info(v)
|
||||||
|
bidi = bidi || info(v).isBidi(s[i:])
|
||||||
|
start := i
|
||||||
|
i += sz
|
||||||
|
// Copy bytes not copied so far.
|
||||||
|
switch p.simplify(info(v).category()) {
|
||||||
|
case valid:
|
||||||
|
continue
|
||||||
|
case disallowed:
|
||||||
|
if err == nil {
|
||||||
|
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||||
|
err = runeError(r)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case mapped, deviation:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
b = info(v).appendMapping(b, s[start:i])
|
||||||
|
case ignored:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
// drop the rune
|
||||||
|
case unknown:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
b = append(b, "\ufffd"...)
|
||||||
|
}
|
||||||
|
k = i
|
||||||
|
}
|
||||||
|
if k == 0 {
|
||||||
|
// No changes so far.
|
||||||
|
if combinedInfoBits&mayNeedNorm != 0 {
|
||||||
|
s = norm.NFC.String(s)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = append(b, s[k:]...)
|
||||||
|
if norm.NFC.QuickSpan(b) != len(b) {
|
||||||
|
b = norm.NFC.Bytes(b)
|
||||||
|
}
|
||||||
|
// TODO: the punycode converters require strings as input.
|
||||||
|
s = string(b)
|
||||||
|
}
|
||||||
|
return s, bidi, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// A labelIter allows iterating over domain name labels.
|
||||||
|
type labelIter struct {
|
||||||
|
orig string
|
||||||
|
slice []string
|
||||||
|
curStart int
|
||||||
|
curEnd int
|
||||||
|
i int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) reset() {
|
||||||
|
l.curStart = 0
|
||||||
|
l.curEnd = 0
|
||||||
|
l.i = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) done() bool {
|
||||||
|
return l.curStart >= len(l.orig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) result() string {
|
||||||
|
if l.slice != nil {
|
||||||
|
return strings.Join(l.slice, ".")
|
||||||
|
}
|
||||||
|
return l.orig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) label() string {
|
||||||
|
if l.slice != nil {
|
||||||
|
return l.slice[l.i]
|
||||||
|
}
|
||||||
|
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||||
|
l.curEnd = l.curStart + p
|
||||||
|
if p == -1 {
|
||||||
|
l.curEnd = len(l.orig)
|
||||||
|
}
|
||||||
|
return l.orig[l.curStart:l.curEnd]
|
||||||
|
}
|
||||||
|
|
||||||
|
// next sets the value to the next label. It skips the last label if it is empty.
|
||||||
|
func (l *labelIter) next() {
|
||||||
|
l.i++
|
||||||
|
if l.slice != nil {
|
||||||
|
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||||
|
l.curStart = len(l.orig)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.curStart = l.curEnd + 1
|
||||||
|
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||||
|
l.curStart = len(l.orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) set(s string) {
|
||||||
|
if l.slice == nil {
|
||||||
|
l.slice = strings.Split(l.orig, ".")
|
||||||
|
}
|
||||||
|
l.slice[l.i] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||||
|
const acePrefix = "xn--"
|
||||||
|
|
||||||
|
func (p *Profile) simplify(cat category) category {
|
||||||
|
switch cat {
|
||||||
|
case disallowedSTD3Mapped:
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
cat = disallowed
|
||||||
|
} else {
|
||||||
|
cat = mapped
|
||||||
|
}
|
||||||
|
case disallowedSTD3Valid:
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
cat = disallowed
|
||||||
|
} else {
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
case deviation:
|
||||||
|
if !p.transitional {
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
case validNV8, validXV8:
|
||||||
|
// TODO: handle V2008
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
return cat
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateFromPunycode(p *Profile, s string) error {
|
||||||
|
if !norm.NFC.IsNormalString(s) {
|
||||||
|
return &labelError{s, "V1"}
|
||||||
|
}
|
||||||
|
// TODO: detect whether string may have to be normalized in the following
|
||||||
|
// loop.
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
if sz == 0 {
|
||||||
|
return runeError(utf8.RuneError)
|
||||||
|
}
|
||||||
|
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||||
|
return &labelError{s, "V6"}
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
zwnj = "\u200c"
|
||||||
|
zwj = "\u200d"
|
||||||
|
)
|
||||||
|
|
||||||
|
type joinState int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateStart joinState = iota
|
||||||
|
stateVirama
|
||||||
|
stateBefore
|
||||||
|
stateBeforeVirama
|
||||||
|
stateAfter
|
||||||
|
stateFAIL
|
||||||
|
)
|
||||||
|
|
||||||
|
var joinStates = [][numJoinTypes]joinState{
|
||||||
|
stateStart: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateVirama,
|
||||||
|
},
|
||||||
|
stateVirama: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
},
|
||||||
|
stateBefore: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateBefore,
|
||||||
|
joinZWNJ: stateAfter,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateBeforeVirama,
|
||||||
|
},
|
||||||
|
stateBeforeVirama: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateBefore,
|
||||||
|
},
|
||||||
|
stateAfter: {
|
||||||
|
joiningL: stateFAIL,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateAfter,
|
||||||
|
joiningR: stateStart,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||||
|
},
|
||||||
|
stateFAIL: {
|
||||||
|
0: stateFAIL,
|
||||||
|
joiningL: stateFAIL,
|
||||||
|
joiningD: stateFAIL,
|
||||||
|
joiningT: stateFAIL,
|
||||||
|
joiningR: stateFAIL,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateFAIL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||||
|
// already implicitly satisfied by the overall implementation.
|
||||||
|
func (p *Profile) validateLabel(s string) (err error) {
|
||||||
|
if s == "" {
|
||||||
|
if p.verifyDNSLength {
|
||||||
|
return &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !p.validateLabels {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
trie := p.trie // p.validateLabels is only set if trie is set.
|
||||||
|
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||||
|
return &labelError{s, "V2"}
|
||||||
|
}
|
||||||
|
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||||
|
return &labelError{s, "V3"}
|
||||||
|
}
|
||||||
|
// TODO: merge the use of this in the trie.
|
||||||
|
v, sz := trie.lookupString(s)
|
||||||
|
x := info(v)
|
||||||
|
if x.isModifier() {
|
||||||
|
return &labelError{s, "V5"}
|
||||||
|
}
|
||||||
|
// Quickly return in the absence of zero-width (non) joiners.
|
||||||
|
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
st := stateStart
|
||||||
|
for i := 0; ; {
|
||||||
|
jt := x.joinType()
|
||||||
|
if s[i:i+sz] == zwj {
|
||||||
|
jt = joinZWJ
|
||||||
|
} else if s[i:i+sz] == zwnj {
|
||||||
|
jt = joinZWNJ
|
||||||
|
}
|
||||||
|
st = joinStates[st][jt]
|
||||||
|
if x.isViramaModifier() {
|
||||||
|
st = joinStates[st][joinVirama]
|
||||||
|
}
|
||||||
|
if i += sz; i == len(s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v, sz = trie.lookupString(s[i:])
|
||||||
|
x = info(v)
|
||||||
|
}
|
||||||
|
if st == stateFAIL || st == stateAfter {
|
||||||
|
return &labelError{s, "C"}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ascii(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] >= utf8.RuneSelf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,682 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.10
|
||||||
|
|
||||||
|
// Package idna implements IDNA2008 using the compatibility processing
|
||||||
|
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||||
|
// deal with the transition from IDNA2003.
|
||||||
|
//
|
||||||
|
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||||
|
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||||
|
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||||
|
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||||
|
// differences between these two standards.
|
||||||
|
package idna // import "golang.org/x/net/idna"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/secure/bidirule"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||||
|
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||||
|
// evaluated string as lookup.
|
||||||
|
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||||
|
// Other strategies are also viable, though:
|
||||||
|
// Option 1) Return an empty string in case of error, but allow the user to
|
||||||
|
// specify explicitly which errors to ignore.
|
||||||
|
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||||
|
// string, otherwise return the empty string in case of error.
|
||||||
|
// Option 3) Option 1 and 2.
|
||||||
|
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||||
|
// needed, and document that the return string may not be empty in case of
|
||||||
|
// error in the future.
|
||||||
|
// I think Option 1 is best, but it is quite opinionated.
|
||||||
|
|
||||||
|
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||||
|
func ToASCII(s string) (string, error) {
|
||||||
|
return Punycode.process(s, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||||
|
func ToUnicode(s string) (string, error) {
|
||||||
|
return Punycode.process(s, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Option configures a Profile at creation time.
|
||||||
|
type Option func(*options)
|
||||||
|
|
||||||
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
|
// compatibility. It is used by most browsers when resolving domain names. This
|
||||||
|
// option is only meaningful if combined with MapForLookup.
|
||||||
|
func Transitional(transitional bool) Option {
|
||||||
|
return func(o *options) { o.transitional = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
|
// are longer than allowed by the RFC.
|
||||||
|
func VerifyDNSLength(verify bool) Option {
|
||||||
|
return func(o *options) { o.verifyDNSLength = verify }
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||||
|
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||||
|
//
|
||||||
|
// This is the behavior suggested by the UTS #46 and is adopted by some
|
||||||
|
// browsers.
|
||||||
|
func RemoveLeadingDots(remove bool) Option {
|
||||||
|
return func(o *options) { o.removeLeadingDots = remove }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||||
|
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||||
|
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||||
|
func ValidateLabels(enable bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
// Don't override existing mappings, but set one that at least checks
|
||||||
|
// normalization if it is not set.
|
||||||
|
if o.mapping == nil && enable {
|
||||||
|
o.mapping = normalize
|
||||||
|
}
|
||||||
|
o.trie = trie
|
||||||
|
o.validateLabels = enable
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrictDomainName limits the set of permissable ASCII characters to those
|
||||||
|
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||||
|
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
||||||
|
//
|
||||||
|
// This option is useful, for instance, for browsers that allow characters
|
||||||
|
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||||
|
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
||||||
|
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
||||||
|
func StrictDomainName(use bool) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.trie = trie
|
||||||
|
o.useSTD3Rules = use
|
||||||
|
o.fromPuny = validateFromPunycode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: the following options pull in tables. The tables should not be linked
|
||||||
|
// in as long as the options are not used.
|
||||||
|
|
||||||
|
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||||
|
// that relies on proper validation of labels should include this rule.
|
||||||
|
func BidiRule() Option {
|
||||||
|
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||||
|
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||||
|
func ValidateForRegistration() Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.mapping = validateRegistration
|
||||||
|
StrictDomainName(true)(o)
|
||||||
|
ValidateLabels(true)(o)
|
||||||
|
VerifyDNSLength(true)(o)
|
||||||
|
BidiRule()(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||||
|
// transformed for domain name lookup according to the requirements set out in
|
||||||
|
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||||
|
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||||
|
// to add this check.
|
||||||
|
//
|
||||||
|
// The mappings include normalization and mapping case, width and other
|
||||||
|
// compatibility mappings.
|
||||||
|
func MapForLookup() Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.mapping = validateAndMap
|
||||||
|
StrictDomainName(true)(o)
|
||||||
|
ValidateLabels(true)(o)
|
||||||
|
RemoveLeadingDots(true)(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
transitional bool
|
||||||
|
useSTD3Rules bool
|
||||||
|
validateLabels bool
|
||||||
|
verifyDNSLength bool
|
||||||
|
removeLeadingDots bool
|
||||||
|
|
||||||
|
trie *idnaTrie
|
||||||
|
|
||||||
|
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||||
|
fromPuny func(p *Profile, s string) error
|
||||||
|
|
||||||
|
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||||
|
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||||
|
mapping func(p *Profile, s string) (string, error)
|
||||||
|
|
||||||
|
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||||
|
// defined in RFC 5893.
|
||||||
|
bidirule func(s string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Profile defines the configuration of a IDNA mapper.
|
||||||
|
type Profile struct {
|
||||||
|
options
|
||||||
|
}
|
||||||
|
|
||||||
|
func apply(o *options, opts []Option) {
|
||||||
|
for _, f := range opts {
|
||||||
|
f(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Profile.
|
||||||
|
//
|
||||||
|
// With no options, the returned Profile is the most permissive and equals the
|
||||||
|
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||||
|
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||||
|
// for lookup and registration purposes respectively, which can be tailored by
|
||||||
|
// adding more fine-grained options, where later options override earlier
|
||||||
|
// options.
|
||||||
|
func New(o ...Option) *Profile {
|
||||||
|
p := &Profile{}
|
||||||
|
apply(&p.options, o)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||||
|
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||||
|
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||||
|
// an error and a (partially) processed result.
|
||||||
|
func (p *Profile) ToASCII(s string) (string, error) {
|
||||||
|
return p.process(s, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||||
|
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||||
|
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||||
|
// an error and a (partially) processed result.
|
||||||
|
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||||
|
pp := *p
|
||||||
|
pp.transitional = false
|
||||||
|
return pp.process(s, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String reports a string with a description of the profile for debugging
|
||||||
|
// purposes. The string format may change with different versions.
|
||||||
|
func (p *Profile) String() string {
|
||||||
|
s := ""
|
||||||
|
if p.transitional {
|
||||||
|
s = "Transitional"
|
||||||
|
} else {
|
||||||
|
s = "NonTransitional"
|
||||||
|
}
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
s += ":UseSTD3Rules"
|
||||||
|
}
|
||||||
|
if p.validateLabels {
|
||||||
|
s += ":ValidateLabels"
|
||||||
|
}
|
||||||
|
if p.verifyDNSLength {
|
||||||
|
s += ":VerifyDNSLength"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||||
|
// of validation.
|
||||||
|
Punycode *Profile = punycode
|
||||||
|
|
||||||
|
// Lookup is the recommended profile for looking up domain names, according
|
||||||
|
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||||
|
// change over time.
|
||||||
|
Lookup *Profile = lookup
|
||||||
|
|
||||||
|
// Display is the recommended profile for displaying domain names.
|
||||||
|
// The configuration of this profile may change over time.
|
||||||
|
Display *Profile = display
|
||||||
|
|
||||||
|
// Registration is the recommended profile for checking whether a given
|
||||||
|
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||||
|
Registration *Profile = registration
|
||||||
|
|
||||||
|
punycode = &Profile{}
|
||||||
|
lookup = &Profile{options{
|
||||||
|
transitional: true,
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
removeLeadingDots: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
display = &Profile{options{
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
removeLeadingDots: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateAndMap,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
registration = &Profile{options{
|
||||||
|
useSTD3Rules: true,
|
||||||
|
validateLabels: true,
|
||||||
|
verifyDNSLength: true,
|
||||||
|
trie: trie,
|
||||||
|
fromPuny: validateFromPunycode,
|
||||||
|
mapping: validateRegistration,
|
||||||
|
bidirule: bidirule.ValidString,
|
||||||
|
}}
|
||||||
|
|
||||||
|
// TODO: profiles
|
||||||
|
// Register: recommended for approving domain names: don't do any mappings
|
||||||
|
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||||
|
)
|
||||||
|
|
||||||
|
type labelError struct{ label, code_ string }
|
||||||
|
|
||||||
|
func (e labelError) code() string { return e.code_ }
|
||||||
|
func (e labelError) Error() string {
|
||||||
|
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||||
|
}
|
||||||
|
|
||||||
|
type runeError rune
|
||||||
|
|
||||||
|
func (e runeError) code() string { return "P1" }
|
||||||
|
func (e runeError) Error() string {
|
||||||
|
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// process implements the algorithm described in section 4 of UTS #46,
|
||||||
|
// see https://www.unicode.org/reports/tr46.
|
||||||
|
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||||
|
var err error
|
||||||
|
if p.mapping != nil {
|
||||||
|
s, err = p.mapping(p, s)
|
||||||
|
}
|
||||||
|
// Remove leading empty labels.
|
||||||
|
if p.removeLeadingDots {
|
||||||
|
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// It seems like we should only create this error on ToASCII, but the
|
||||||
|
// UTS 46 conformance tests suggests we should always check this.
|
||||||
|
if err == nil && p.verifyDNSLength && s == "" {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
labels := labelIter{orig: s}
|
||||||
|
for ; !labels.done(); labels.next() {
|
||||||
|
label := labels.label()
|
||||||
|
if label == "" {
|
||||||
|
// Empty labels are not okay. The label iterator skips the last
|
||||||
|
// label if it is empty.
|
||||||
|
if err == nil && p.verifyDNSLength {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(label, acePrefix) {
|
||||||
|
u, err2 := decode(label[len(acePrefix):])
|
||||||
|
if err2 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
// Spec says keep the old label.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labels.set(u)
|
||||||
|
if err == nil && p.validateLabels {
|
||||||
|
err = p.fromPuny(p, u)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// This should be called on NonTransitional, according to the
|
||||||
|
// spec, but that currently does not have any effect. Use the
|
||||||
|
// original profile to preserve options.
|
||||||
|
err = p.validateLabel(u)
|
||||||
|
}
|
||||||
|
} else if err == nil {
|
||||||
|
err = p.validateLabel(label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if toASCII {
|
||||||
|
for labels.reset(); !labels.done(); labels.next() {
|
||||||
|
label := labels.label()
|
||||||
|
if !ascii(label) {
|
||||||
|
a, err2 := encode(acePrefix, label)
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
label = a
|
||||||
|
labels.set(a)
|
||||||
|
}
|
||||||
|
n := len(label)
|
||||||
|
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||||
|
err = &labelError{label, "A4"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s = labels.result()
|
||||||
|
if toASCII && p.verifyDNSLength && err == nil {
|
||||||
|
// Compute the length of the domain name minus the root label and its dot.
|
||||||
|
n := len(s)
|
||||||
|
if n > 0 && s[n-1] == '.' {
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if len(s) < 1 || n > 253 {
|
||||||
|
err = &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalize(p *Profile, s string) (string, error) {
|
||||||
|
return norm.NFC.String(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRegistration(p *Profile, s string) (string, error) {
|
||||||
|
if !norm.NFC.IsNormalString(s) {
|
||||||
|
return s, &labelError{s, "V1"}
|
||||||
|
}
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
// Copy bytes not copied so far.
|
||||||
|
switch p.simplify(info(v).category()) {
|
||||||
|
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||||
|
// for strict conformance to IDNA2008.
|
||||||
|
case valid, deviation:
|
||||||
|
case disallowed, mapped, unknown, ignored:
|
||||||
|
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||||
|
return s, runeError(r)
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateAndMap(p *Profile, s string) (string, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
b []byte
|
||||||
|
k int
|
||||||
|
)
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
start := i
|
||||||
|
i += sz
|
||||||
|
// Copy bytes not copied so far.
|
||||||
|
switch p.simplify(info(v).category()) {
|
||||||
|
case valid:
|
||||||
|
continue
|
||||||
|
case disallowed:
|
||||||
|
if err == nil {
|
||||||
|
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||||
|
err = runeError(r)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case mapped, deviation:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
b = info(v).appendMapping(b, s[start:i])
|
||||||
|
case ignored:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
// drop the rune
|
||||||
|
case unknown:
|
||||||
|
b = append(b, s[k:start]...)
|
||||||
|
b = append(b, "\ufffd"...)
|
||||||
|
}
|
||||||
|
k = i
|
||||||
|
}
|
||||||
|
if k == 0 {
|
||||||
|
// No changes so far.
|
||||||
|
s = norm.NFC.String(s)
|
||||||
|
} else {
|
||||||
|
b = append(b, s[k:]...)
|
||||||
|
if norm.NFC.QuickSpan(b) != len(b) {
|
||||||
|
b = norm.NFC.Bytes(b)
|
||||||
|
}
|
||||||
|
// TODO: the punycode converters require strings as input.
|
||||||
|
s = string(b)
|
||||||
|
}
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// A labelIter allows iterating over domain name labels.
|
||||||
|
type labelIter struct {
|
||||||
|
orig string
|
||||||
|
slice []string
|
||||||
|
curStart int
|
||||||
|
curEnd int
|
||||||
|
i int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) reset() {
|
||||||
|
l.curStart = 0
|
||||||
|
l.curEnd = 0
|
||||||
|
l.i = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) done() bool {
|
||||||
|
return l.curStart >= len(l.orig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) result() string {
|
||||||
|
if l.slice != nil {
|
||||||
|
return strings.Join(l.slice, ".")
|
||||||
|
}
|
||||||
|
return l.orig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) label() string {
|
||||||
|
if l.slice != nil {
|
||||||
|
return l.slice[l.i]
|
||||||
|
}
|
||||||
|
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||||
|
l.curEnd = l.curStart + p
|
||||||
|
if p == -1 {
|
||||||
|
l.curEnd = len(l.orig)
|
||||||
|
}
|
||||||
|
return l.orig[l.curStart:l.curEnd]
|
||||||
|
}
|
||||||
|
|
||||||
|
// next sets the value to the next label. It skips the last label if it is empty.
|
||||||
|
func (l *labelIter) next() {
|
||||||
|
l.i++
|
||||||
|
if l.slice != nil {
|
||||||
|
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||||
|
l.curStart = len(l.orig)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.curStart = l.curEnd + 1
|
||||||
|
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||||
|
l.curStart = len(l.orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *labelIter) set(s string) {
|
||||||
|
if l.slice == nil {
|
||||||
|
l.slice = strings.Split(l.orig, ".")
|
||||||
|
}
|
||||||
|
l.slice[l.i] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||||
|
const acePrefix = "xn--"
|
||||||
|
|
||||||
|
func (p *Profile) simplify(cat category) category {
|
||||||
|
switch cat {
|
||||||
|
case disallowedSTD3Mapped:
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
cat = disallowed
|
||||||
|
} else {
|
||||||
|
cat = mapped
|
||||||
|
}
|
||||||
|
case disallowedSTD3Valid:
|
||||||
|
if p.useSTD3Rules {
|
||||||
|
cat = disallowed
|
||||||
|
} else {
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
case deviation:
|
||||||
|
if !p.transitional {
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
case validNV8, validXV8:
|
||||||
|
// TODO: handle V2008
|
||||||
|
cat = valid
|
||||||
|
}
|
||||||
|
return cat
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateFromPunycode(p *Profile, s string) error {
|
||||||
|
if !norm.NFC.IsNormalString(s) {
|
||||||
|
return &labelError{s, "V1"}
|
||||||
|
}
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
v, sz := trie.lookupString(s[i:])
|
||||||
|
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||||
|
return &labelError{s, "V6"}
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
zwnj = "\u200c"
|
||||||
|
zwj = "\u200d"
|
||||||
|
)
|
||||||
|
|
||||||
|
type joinState int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateStart joinState = iota
|
||||||
|
stateVirama
|
||||||
|
stateBefore
|
||||||
|
stateBeforeVirama
|
||||||
|
stateAfter
|
||||||
|
stateFAIL
|
||||||
|
)
|
||||||
|
|
||||||
|
var joinStates = [][numJoinTypes]joinState{
|
||||||
|
stateStart: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateVirama,
|
||||||
|
},
|
||||||
|
stateVirama: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
},
|
||||||
|
stateBefore: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateBefore,
|
||||||
|
joinZWNJ: stateAfter,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateBeforeVirama,
|
||||||
|
},
|
||||||
|
stateBeforeVirama: {
|
||||||
|
joiningL: stateBefore,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateBefore,
|
||||||
|
},
|
||||||
|
stateAfter: {
|
||||||
|
joiningL: stateFAIL,
|
||||||
|
joiningD: stateBefore,
|
||||||
|
joiningT: stateAfter,
|
||||||
|
joiningR: stateStart,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||||
|
},
|
||||||
|
stateFAIL: {
|
||||||
|
0: stateFAIL,
|
||||||
|
joiningL: stateFAIL,
|
||||||
|
joiningD: stateFAIL,
|
||||||
|
joiningT: stateFAIL,
|
||||||
|
joiningR: stateFAIL,
|
||||||
|
joinZWNJ: stateFAIL,
|
||||||
|
joinZWJ: stateFAIL,
|
||||||
|
joinVirama: stateFAIL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||||
|
// already implicitly satisfied by the overall implementation.
|
||||||
|
func (p *Profile) validateLabel(s string) error {
|
||||||
|
if s == "" {
|
||||||
|
if p.verifyDNSLength {
|
||||||
|
return &labelError{s, "A4"}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if p.bidirule != nil && !p.bidirule(s) {
|
||||||
|
return &labelError{s, "B"}
|
||||||
|
}
|
||||||
|
if !p.validateLabels {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
trie := p.trie // p.validateLabels is only set if trie is set.
|
||||||
|
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||||
|
return &labelError{s, "V2"}
|
||||||
|
}
|
||||||
|
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||||
|
return &labelError{s, "V3"}
|
||||||
|
}
|
||||||
|
// TODO: merge the use of this in the trie.
|
||||||
|
v, sz := trie.lookupString(s)
|
||||||
|
x := info(v)
|
||||||
|
if x.isModifier() {
|
||||||
|
return &labelError{s, "V5"}
|
||||||
|
}
|
||||||
|
// Quickly return in the absence of zero-width (non) joiners.
|
||||||
|
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
st := stateStart
|
||||||
|
for i := 0; ; {
|
||||||
|
jt := x.joinType()
|
||||||
|
if s[i:i+sz] == zwj {
|
||||||
|
jt = joinZWJ
|
||||||
|
} else if s[i:i+sz] == zwnj {
|
||||||
|
jt = joinZWNJ
|
||||||
|
}
|
||||||
|
st = joinStates[st][jt]
|
||||||
|
if x.isViramaModifier() {
|
||||||
|
st = joinStates[st][joinVirama]
|
||||||
|
}
|
||||||
|
if i += sz; i == len(s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v, sz = trie.lookupString(s[i:])
|
||||||
|
x = info(v)
|
||||||
|
}
|
||||||
|
if st == stateFAIL || st == stateAfter {
|
||||||
|
return &labelError{s, "C"}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ascii(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] >= utf8.RuneSelf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// This file implements the Punycode algorithm from RFC 3492.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These parameter values are specified in section 5.
|
||||||
|
//
|
||||||
|
// All computation is done with int32s, so that overflow behavior is identical
|
||||||
|
// regardless of whether int is 32-bit or 64-bit.
|
||||||
|
const (
|
||||||
|
base int32 = 36
|
||||||
|
damp int32 = 700
|
||||||
|
initialBias int32 = 72
|
||||||
|
initialN int32 = 128
|
||||||
|
skew int32 = 38
|
||||||
|
tmax int32 = 26
|
||||||
|
tmin int32 = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||||
|
|
||||||
|
// decode decodes a string as specified in section 6.2.
|
||||||
|
func decode(encoded string) (string, error) {
|
||||||
|
if encoded == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
pos := 1 + strings.LastIndex(encoded, "-")
|
||||||
|
if pos == 1 {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
if pos == len(encoded) {
|
||||||
|
return encoded[:len(encoded)-1], nil
|
||||||
|
}
|
||||||
|
output := make([]rune, 0, len(encoded))
|
||||||
|
if pos != 0 {
|
||||||
|
for _, r := range encoded[:pos-1] {
|
||||||
|
output = append(output, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i, n, bias := int32(0), initialN, initialBias
|
||||||
|
for pos < len(encoded) {
|
||||||
|
oldI, w := i, int32(1)
|
||||||
|
for k := base; ; k += base {
|
||||||
|
if pos == len(encoded) {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
digit, ok := decodeDigit(encoded[pos])
|
||||||
|
if !ok {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
i += digit * w
|
||||||
|
if i < 0 {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
t := k - bias
|
||||||
|
if t < tmin {
|
||||||
|
t = tmin
|
||||||
|
} else if t > tmax {
|
||||||
|
t = tmax
|
||||||
|
}
|
||||||
|
if digit < t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
w *= base - t
|
||||||
|
if w >= math.MaxInt32/base {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
x := int32(len(output) + 1)
|
||||||
|
bias = adapt(i-oldI, x, oldI == 0)
|
||||||
|
n += i / x
|
||||||
|
i %= x
|
||||||
|
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
|
output = append(output, 0)
|
||||||
|
copy(output[i+1:], output[i:])
|
||||||
|
output[i] = n
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||||
|
// the result.
|
||||||
|
//
|
||||||
|
// The "while h < length(input)" line in the specification becomes "for
|
||||||
|
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||||
|
func encode(prefix, s string) (string, error) {
|
||||||
|
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||||
|
copy(output, prefix)
|
||||||
|
delta, n, bias := int32(0), initialN, initialBias
|
||||||
|
b, remaining := int32(0), int32(0)
|
||||||
|
for _, r := range s {
|
||||||
|
if r < 0x80 {
|
||||||
|
b++
|
||||||
|
output = append(output, byte(r))
|
||||||
|
} else {
|
||||||
|
remaining++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h := b
|
||||||
|
if b > 0 {
|
||||||
|
output = append(output, '-')
|
||||||
|
}
|
||||||
|
for remaining != 0 {
|
||||||
|
m := int32(0x7fffffff)
|
||||||
|
for _, r := range s {
|
||||||
|
if m > r && r >= n {
|
||||||
|
m = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delta += (m - n) * (h + 1)
|
||||||
|
if delta < 0 {
|
||||||
|
return "", punyError(s)
|
||||||
|
}
|
||||||
|
n = m
|
||||||
|
for _, r := range s {
|
||||||
|
if r < n {
|
||||||
|
delta++
|
||||||
|
if delta < 0 {
|
||||||
|
return "", punyError(s)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r > n {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q := delta
|
||||||
|
for k := base; ; k += base {
|
||||||
|
t := k - bias
|
||||||
|
if t < tmin {
|
||||||
|
t = tmin
|
||||||
|
} else if t > tmax {
|
||||||
|
t = tmax
|
||||||
|
}
|
||||||
|
if q < t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||||
|
q = (q - t) / (base - t)
|
||||||
|
}
|
||||||
|
output = append(output, encodeDigit(q))
|
||||||
|
bias = adapt(delta, h+1, h == b)
|
||||||
|
delta = 0
|
||||||
|
h++
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
delta++
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||||
|
switch {
|
||||||
|
case '0' <= x && x <= '9':
|
||||||
|
return int32(x - ('0' - 26)), true
|
||||||
|
case 'A' <= x && x <= 'Z':
|
||||||
|
return int32(x - 'A'), true
|
||||||
|
case 'a' <= x && x <= 'z':
|
||||||
|
return int32(x - 'a'), true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeDigit(digit int32) byte {
|
||||||
|
switch {
|
||||||
|
case 0 <= digit && digit < 26:
|
||||||
|
return byte(digit + 'a')
|
||||||
|
case 26 <= digit && digit < 36:
|
||||||
|
return byte(digit + ('0' - 26))
|
||||||
|
}
|
||||||
|
panic("idna: internal error in punycode encoding")
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapt is the bias adaptation function specified in section 6.1.
|
||||||
|
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||||
|
if firstTime {
|
||||||
|
delta /= damp
|
||||||
|
} else {
|
||||||
|
delta /= 2
|
||||||
|
}
|
||||||
|
delta += delta / numPoints
|
||||||
|
k := int32(0)
|
||||||
|
for delta > ((base-tmin)*tmax)/2 {
|
||||||
|
delta /= base - tmin
|
||||||
|
k += base
|
||||||
|
}
|
||||||
|
return k + (base-tmin+1)*delta/(delta+skew)
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,72 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||||
|
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||||
|
func (c info) appendMapping(b []byte, s string) []byte {
|
||||||
|
index := int(c >> indexShift)
|
||||||
|
if c&xorBit == 0 {
|
||||||
|
s := mappings[index:]
|
||||||
|
return append(b, s[1:s[0]+1]...)
|
||||||
|
}
|
||||||
|
b = append(b, s...)
|
||||||
|
if c&inlineXOR == inlineXOR {
|
||||||
|
// TODO: support and handle two-byte inline masks
|
||||||
|
b[len(b)-1] ^= byte(index)
|
||||||
|
} else {
|
||||||
|
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||||
|
index++
|
||||||
|
b[p] ^= xorData[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sparse block handling code.
|
||||||
|
|
||||||
|
type valueRange struct {
|
||||||
|
value uint16 // header: value:stride
|
||||||
|
lo, hi byte // header: lo:n
|
||||||
|
}
|
||||||
|
|
||||||
|
type sparseBlocks struct {
|
||||||
|
values []valueRange
|
||||||
|
offset []uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
var idnaSparse = sparseBlocks{
|
||||||
|
values: idnaSparseValues[:],
|
||||||
|
offset: idnaSparseOffset[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
||||||
|
var trie = &idnaTrie{}
|
||||||
|
|
||||||
|
// lookup determines the type of block n and looks up the value for b.
|
||||||
|
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||||
|
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||||
|
// the value for b is by r.value + (b - r.lo) * stride.
|
||||||
|
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||||
|
offset := t.offset[n]
|
||||||
|
header := t.values[offset]
|
||||||
|
lo := offset + 1
|
||||||
|
hi := lo + uint16(header.lo)
|
||||||
|
for lo < hi {
|
||||||
|
m := lo + (hi-lo)/2
|
||||||
|
r := t.values[m]
|
||||||
|
if r.lo <= b && b <= r.hi {
|
||||||
|
return r.value + uint16(b-r.lo)*header.value
|
||||||
|
}
|
||||||
|
if b < r.lo {
|
||||||
|
hi = m
|
||||||
|
} else {
|
||||||
|
lo = m + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
|
@ -0,0 +1,119 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// This file contains definitions for interpreting the trie value of the idna
|
||||||
|
// trie generated by "go run gen*.go". It is shared by both the generator
|
||||||
|
// program and the resultant package. Sharing is achieved by the generator
|
||||||
|
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
||||||
|
|
||||||
|
// info holds information from the IDNA mapping table for a single rune. It is
|
||||||
|
// the value returned by a trie lookup. In most cases, all information fits in
|
||||||
|
// a 16-bit value. For mappings, this value may contain an index into a slice
|
||||||
|
// with the mapped string. Such mappings can consist of the actual mapped value
|
||||||
|
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
||||||
|
// input rune. This technique is used by the cases packages and reduces the
|
||||||
|
// table size significantly.
|
||||||
|
//
|
||||||
|
// The per-rune values have the following format:
|
||||||
|
//
|
||||||
|
// if mapped {
|
||||||
|
// if inlinedXOR {
|
||||||
|
// 15..13 inline XOR marker
|
||||||
|
// 12..11 unused
|
||||||
|
// 10..3 inline XOR mask
|
||||||
|
// } else {
|
||||||
|
// 15..3 index into xor or mapping table
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// 15..14 unused
|
||||||
|
// 13 mayNeedNorm
|
||||||
|
// 12..11 attributes
|
||||||
|
// 10..8 joining type
|
||||||
|
// 7..3 category type
|
||||||
|
// }
|
||||||
|
// 2 use xor pattern
|
||||||
|
// 1..0 mapped category
|
||||||
|
//
|
||||||
|
// See the definitions below for a more detailed description of the various
|
||||||
|
// bits.
|
||||||
|
type info uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
catSmallMask = 0x3
|
||||||
|
catBigMask = 0xF8
|
||||||
|
indexShift = 3
|
||||||
|
xorBit = 0x4 // interpret the index as an xor pattern
|
||||||
|
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
||||||
|
|
||||||
|
joinShift = 8
|
||||||
|
joinMask = 0x07
|
||||||
|
|
||||||
|
// Attributes
|
||||||
|
attributesMask = 0x1800
|
||||||
|
viramaModifier = 0x1800
|
||||||
|
modifier = 0x1000
|
||||||
|
rtl = 0x0800
|
||||||
|
|
||||||
|
mayNeedNorm = 0x2000
|
||||||
|
)
|
||||||
|
|
||||||
|
// A category corresponds to a category defined in the IDNA mapping table.
|
||||||
|
type category uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknown category = 0 // not currently defined in unicode.
|
||||||
|
mapped category = 1
|
||||||
|
disallowedSTD3Mapped category = 2
|
||||||
|
deviation category = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
valid category = 0x08
|
||||||
|
validNV8 category = 0x18
|
||||||
|
validXV8 category = 0x28
|
||||||
|
disallowed category = 0x40
|
||||||
|
disallowedSTD3Valid category = 0x80
|
||||||
|
ignored category = 0xC0
|
||||||
|
)
|
||||||
|
|
||||||
|
// join types and additional rune information
|
||||||
|
const (
|
||||||
|
joiningL = (iota + 1)
|
||||||
|
joiningD
|
||||||
|
joiningT
|
||||||
|
joiningR
|
||||||
|
|
||||||
|
//the following types are derived during processing
|
||||||
|
joinZWJ
|
||||||
|
joinZWNJ
|
||||||
|
joinVirama
|
||||||
|
numJoinTypes
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c info) isMapped() bool {
|
||||||
|
return c&0x3 != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c info) category() category {
|
||||||
|
small := c & catSmallMask
|
||||||
|
if small != 0 {
|
||||||
|
return category(small)
|
||||||
|
}
|
||||||
|
return category(c & catBigMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c info) joinType() info {
|
||||||
|
if c.isMapped() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (c >> joinShift) & joinMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c info) isModifier() bool {
|
||||||
|
return c&(modifier|catSmallMask) == modifier
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c info) isViramaModifier() bool {
|
||||||
|
return c&(attributesMask|catSmallMask) == viramaModifier
|
||||||
|
}
|
|
@ -0,0 +1,198 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
// Package registry provides access to the Windows registry.
|
||||||
|
//
|
||||||
|
// Here is a simple example, opening a registry key and reading a string value from it.
|
||||||
|
//
|
||||||
|
// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
// defer k.Close()
|
||||||
|
//
|
||||||
|
// s, _, err := k.GetStringValue("SystemRoot")
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
// fmt.Printf("Windows system root is %q\n", s)
|
||||||
|
//
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Registry key security and access rights.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
|
||||||
|
// for details.
|
||||||
|
ALL_ACCESS = 0xf003f
|
||||||
|
CREATE_LINK = 0x00020
|
||||||
|
CREATE_SUB_KEY = 0x00004
|
||||||
|
ENUMERATE_SUB_KEYS = 0x00008
|
||||||
|
EXECUTE = 0x20019
|
||||||
|
NOTIFY = 0x00010
|
||||||
|
QUERY_VALUE = 0x00001
|
||||||
|
READ = 0x20019
|
||||||
|
SET_VALUE = 0x00002
|
||||||
|
WOW64_32KEY = 0x00200
|
||||||
|
WOW64_64KEY = 0x00100
|
||||||
|
WRITE = 0x20006
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key is a handle to an open Windows registry key.
|
||||||
|
// Keys can be obtained by calling OpenKey; there are
|
||||||
|
// also some predefined root keys such as CURRENT_USER.
|
||||||
|
// Keys can be used directly in the Windows API.
|
||||||
|
type Key syscall.Handle
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Windows defines some predefined root keys that are always open.
|
||||||
|
// An application can use these keys as entry points to the registry.
|
||||||
|
// Normally these keys are used in OpenKey to open new keys,
|
||||||
|
// but they can also be used anywhere a Key is required.
|
||||||
|
CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
|
||||||
|
CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
|
||||||
|
LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
|
||||||
|
USERS = Key(syscall.HKEY_USERS)
|
||||||
|
CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
|
||||||
|
PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Close closes open key k.
|
||||||
|
func (k Key) Close() error {
|
||||||
|
return syscall.RegCloseKey(syscall.Handle(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenKey opens a new key with path name relative to key k.
|
||||||
|
// It accepts any open key, including CURRENT_USER and others,
|
||||||
|
// and returns the new key and an error.
|
||||||
|
// The access parameter specifies desired access rights to the
|
||||||
|
// key to be opened.
|
||||||
|
func OpenKey(k Key, path string, access uint32) (Key, error) {
|
||||||
|
p, err := syscall.UTF16PtrFromString(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var subkey syscall.Handle
|
||||||
|
err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return Key(subkey), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenRemoteKey opens a predefined registry key on another
|
||||||
|
// computer pcname. The key to be opened is specified by k, but
|
||||||
|
// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
|
||||||
|
// If pcname is "", OpenRemoteKey returns local computer key.
|
||||||
|
func OpenRemoteKey(pcname string, k Key) (Key, error) {
|
||||||
|
var err error
|
||||||
|
var p *uint16
|
||||||
|
if pcname != "" {
|
||||||
|
p, err = syscall.UTF16PtrFromString(`\\` + pcname)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var remoteKey syscall.Handle
|
||||||
|
err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return Key(remoteKey), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSubKeyNames returns the names of subkeys of key k.
|
||||||
|
// The parameter n controls the number of returned names,
|
||||||
|
// analogous to the way os.File.Readdirnames works.
|
||||||
|
func (k Key) ReadSubKeyNames(n int) ([]string, error) {
|
||||||
|
names := make([]string, 0)
|
||||||
|
// Registry key size limit is 255 bytes and described there:
|
||||||
|
// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
|
||||||
|
buf := make([]uint16, 256) //plus extra room for terminating zero byte
|
||||||
|
loopItems:
|
||||||
|
for i := uint32(0); ; i++ {
|
||||||
|
if n > 0 {
|
||||||
|
if len(names) == n {
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l := uint32(len(buf))
|
||||||
|
for {
|
||||||
|
err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err == syscall.ERROR_MORE_DATA {
|
||||||
|
// Double buffer size and try again.
|
||||||
|
l = uint32(2 * len(buf))
|
||||||
|
buf = make([]uint16, l)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err == _ERROR_NO_MORE_ITEMS {
|
||||||
|
break loopItems
|
||||||
|
}
|
||||||
|
return names, err
|
||||||
|
}
|
||||||
|
names = append(names, syscall.UTF16ToString(buf[:l]))
|
||||||
|
}
|
||||||
|
if n > len(names) {
|
||||||
|
return names, io.EOF
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateKey creates a key named path under open key k.
|
||||||
|
// CreateKey returns the new key and a boolean flag that reports
|
||||||
|
// whether the key already existed.
|
||||||
|
// The access parameter specifies the access rights for the key
|
||||||
|
// to be created.
|
||||||
|
func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
|
||||||
|
var h syscall.Handle
|
||||||
|
var d uint32
|
||||||
|
err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
|
||||||
|
0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false, err
|
||||||
|
}
|
||||||
|
return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteKey deletes the subkey path of key k and its values.
|
||||||
|
func DeleteKey(k Key, path string) error {
|
||||||
|
return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A KeyInfo describes the statistics of a key. It is returned by Stat.
|
||||||
|
type KeyInfo struct {
|
||||||
|
SubKeyCount uint32
|
||||||
|
MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
|
||||||
|
ValueCount uint32
|
||||||
|
MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
|
||||||
|
MaxValueLen uint32 // longest data component among the key's values, in bytes
|
||||||
|
lastWriteTime syscall.Filetime
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the key's last write time.
|
||||||
|
func (ki *KeyInfo) ModTime() time.Time {
|
||||||
|
return time.Unix(0, ki.lastWriteTime.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat retrieves information about the open key k.
|
||||||
|
func (k Key) Stat() (*KeyInfo, error) {
|
||||||
|
var ki KeyInfo
|
||||||
|
err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
|
||||||
|
&ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
|
||||||
|
&ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ki, nil
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build generate
|
||||||
|
|
||||||
|
package registry
|
||||||
|
|
||||||
|
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go
|
|
@ -0,0 +1,32 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const (
|
||||||
|
_REG_OPTION_NON_VOLATILE = 0
|
||||||
|
|
||||||
|
_REG_CREATED_NEW_KEY = 1
|
||||||
|
_REG_OPENED_EXISTING_KEY = 2
|
||||||
|
|
||||||
|
_ERROR_NO_MORE_ITEMS syscall.Errno = 259
|
||||||
|
)
|
||||||
|
|
||||||
|
func LoadRegLoadMUIString() error {
|
||||||
|
return procRegLoadMUIStringW.Find()
|
||||||
|
}
|
||||||
|
|
||||||
|
//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
|
||||||
|
//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
|
||||||
|
//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
|
||||||
|
//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
|
||||||
|
//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
|
||||||
|
//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
|
||||||
|
//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
|
||||||
|
|
||||||
|
//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
|
|
@ -0,0 +1,387 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"syscall"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Registry value types.
|
||||||
|
NONE = 0
|
||||||
|
SZ = 1
|
||||||
|
EXPAND_SZ = 2
|
||||||
|
BINARY = 3
|
||||||
|
DWORD = 4
|
||||||
|
DWORD_BIG_ENDIAN = 5
|
||||||
|
LINK = 6
|
||||||
|
MULTI_SZ = 7
|
||||||
|
RESOURCE_LIST = 8
|
||||||
|
FULL_RESOURCE_DESCRIPTOR = 9
|
||||||
|
RESOURCE_REQUIREMENTS_LIST = 10
|
||||||
|
QWORD = 11
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrShortBuffer is returned when the buffer was too short for the operation.
|
||||||
|
ErrShortBuffer = syscall.ERROR_MORE_DATA
|
||||||
|
|
||||||
|
// ErrNotExist is returned when a registry key or value does not exist.
|
||||||
|
ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
|
||||||
|
|
||||||
|
// ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
|
||||||
|
ErrUnexpectedType = errors.New("unexpected key value type")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetValue retrieves the type and data for the specified value associated
|
||||||
|
// with an open key k. It fills up buffer buf and returns the retrieved
|
||||||
|
// byte count n. If buf is too small to fit the stored value it returns
|
||||||
|
// ErrShortBuffer error along with the required buffer size n.
|
||||||
|
// If no buffer is provided, it returns true and actual buffer size n.
|
||||||
|
// If no buffer is provided, GetValue returns the value's type only.
|
||||||
|
// If the value does not exist, the error returned is ErrNotExist.
|
||||||
|
//
|
||||||
|
// GetValue is a low level function. If value's type is known, use the appropriate
|
||||||
|
// Get*Value function instead.
|
||||||
|
func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
|
||||||
|
pname, err := syscall.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
var pbuf *byte
|
||||||
|
if len(buf) > 0 {
|
||||||
|
pbuf = (*byte)(unsafe.Pointer(&buf[0]))
|
||||||
|
}
|
||||||
|
l := uint32(len(buf))
|
||||||
|
err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
|
||||||
|
if err != nil {
|
||||||
|
return int(l), valtype, err
|
||||||
|
}
|
||||||
|
return int(l), valtype, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
|
||||||
|
p, err := syscall.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
var t uint32
|
||||||
|
n := uint32(len(buf))
|
||||||
|
for {
|
||||||
|
err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
|
||||||
|
if err == nil {
|
||||||
|
return buf[:n], t, nil
|
||||||
|
}
|
||||||
|
if err != syscall.ERROR_MORE_DATA {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if n <= uint32(len(buf)) {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
buf = make([]byte, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStringValue retrieves the string value for the specified
|
||||||
|
// value name associated with an open key k. It also returns the value's type.
|
||||||
|
// If value does not exist, GetStringValue returns ErrNotExist.
|
||||||
|
// If value is not SZ or EXPAND_SZ, it will return the correct value
|
||||||
|
// type and ErrUnexpectedType.
|
||||||
|
func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
|
||||||
|
data, typ, err2 := k.getValue(name, make([]byte, 64))
|
||||||
|
if err2 != nil {
|
||||||
|
return "", typ, err2
|
||||||
|
}
|
||||||
|
switch typ {
|
||||||
|
case SZ, EXPAND_SZ:
|
||||||
|
default:
|
||||||
|
return "", typ, ErrUnexpectedType
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return "", typ, nil
|
||||||
|
}
|
||||||
|
u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:]
|
||||||
|
return syscall.UTF16ToString(u), typ, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMUIStringValue retrieves the localized string value for
|
||||||
|
// the specified value name associated with an open key k.
|
||||||
|
// If the value name doesn't exist or the localized string value
|
||||||
|
// can't be resolved, GetMUIStringValue returns ErrNotExist.
|
||||||
|
// GetMUIStringValue panics if the system doesn't support
|
||||||
|
// regLoadMUIString; use LoadRegLoadMUIString to check if
|
||||||
|
// regLoadMUIString is supported before calling this function.
|
||||||
|
func (k Key) GetMUIStringValue(name string) (string, error) {
|
||||||
|
pname, err := syscall.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]uint16, 1024)
|
||||||
|
var buflen uint32
|
||||||
|
var pdir *uint16
|
||||||
|
|
||||||
|
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
|
||||||
|
if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
|
||||||
|
|
||||||
|
// Try to resolve the string value using the system directory as
|
||||||
|
// a DLL search path; this assumes the string value is of the form
|
||||||
|
// @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
|
||||||
|
|
||||||
|
// This approach works with tzres.dll but may have to be revised
|
||||||
|
// in the future to allow callers to provide custom search paths.
|
||||||
|
|
||||||
|
var s string
|
||||||
|
s, err = ExpandString("%SystemRoot%\\system32\\")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
pdir, err = syscall.UTF16PtrFromString(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
|
||||||
|
if buflen <= uint32(len(buf)) {
|
||||||
|
break // Buffer not growing, assume race; break
|
||||||
|
}
|
||||||
|
buf = make([]uint16, buflen)
|
||||||
|
err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.UTF16ToString(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandString expands environment-variable strings and replaces
|
||||||
|
// them with the values defined for the current user.
|
||||||
|
// Use ExpandString to expand EXPAND_SZ strings.
|
||||||
|
func ExpandString(value string) (string, error) {
|
||||||
|
if value == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
p, err := syscall.UTF16PtrFromString(value)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
r := make([]uint16, 100)
|
||||||
|
for {
|
||||||
|
n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if n <= uint32(len(r)) {
|
||||||
|
u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:]
|
||||||
|
return syscall.UTF16ToString(u), nil
|
||||||
|
}
|
||||||
|
r = make([]uint16, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStringsValue retrieves the []string value for the specified
|
||||||
|
// value name associated with an open key k. It also returns the value's type.
|
||||||
|
// If value does not exist, GetStringsValue returns ErrNotExist.
|
||||||
|
// If value is not MULTI_SZ, it will return the correct value
|
||||||
|
// type and ErrUnexpectedType.
|
||||||
|
func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
|
||||||
|
data, typ, err2 := k.getValue(name, make([]byte, 64))
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, typ, err2
|
||||||
|
}
|
||||||
|
if typ != MULTI_SZ {
|
||||||
|
return nil, typ, ErrUnexpectedType
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, typ, nil
|
||||||
|
}
|
||||||
|
p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2]
|
||||||
|
if len(p) == 0 {
|
||||||
|
return nil, typ, nil
|
||||||
|
}
|
||||||
|
if p[len(p)-1] == 0 {
|
||||||
|
p = p[:len(p)-1] // remove terminating null
|
||||||
|
}
|
||||||
|
val = make([]string, 0, 5)
|
||||||
|
from := 0
|
||||||
|
for i, c := range p {
|
||||||
|
if c == 0 {
|
||||||
|
val = append(val, string(utf16.Decode(p[from:i])))
|
||||||
|
from = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return val, typ, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIntegerValue retrieves the integer value for the specified
|
||||||
|
// value name associated with an open key k. It also returns the value's type.
|
||||||
|
// If value does not exist, GetIntegerValue returns ErrNotExist.
|
||||||
|
// If value is not DWORD or QWORD, it will return the correct value
|
||||||
|
// type and ErrUnexpectedType.
|
||||||
|
func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
|
||||||
|
data, typ, err2 := k.getValue(name, make([]byte, 8))
|
||||||
|
if err2 != nil {
|
||||||
|
return 0, typ, err2
|
||||||
|
}
|
||||||
|
switch typ {
|
||||||
|
case DWORD:
|
||||||
|
if len(data) != 4 {
|
||||||
|
return 0, typ, errors.New("DWORD value is not 4 bytes long")
|
||||||
|
}
|
||||||
|
var val32 uint32
|
||||||
|
copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
|
||||||
|
return uint64(val32), DWORD, nil
|
||||||
|
case QWORD:
|
||||||
|
if len(data) != 8 {
|
||||||
|
return 0, typ, errors.New("QWORD value is not 8 bytes long")
|
||||||
|
}
|
||||||
|
copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
|
||||||
|
return val, QWORD, nil
|
||||||
|
default:
|
||||||
|
return 0, typ, ErrUnexpectedType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBinaryValue retrieves the binary value for the specified
|
||||||
|
// value name associated with an open key k. It also returns the value's type.
|
||||||
|
// If value does not exist, GetBinaryValue returns ErrNotExist.
|
||||||
|
// If value is not BINARY, it will return the correct value
|
||||||
|
// type and ErrUnexpectedType.
|
||||||
|
func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
|
||||||
|
data, typ, err2 := k.getValue(name, make([]byte, 64))
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, typ, err2
|
||||||
|
}
|
||||||
|
if typ != BINARY {
|
||||||
|
return nil, typ, ErrUnexpectedType
|
||||||
|
}
|
||||||
|
return data, typ, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k Key) setValue(name string, valtype uint32, data []byte) error {
|
||||||
|
p, err := syscall.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
|
||||||
|
}
|
||||||
|
return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDWordValue sets the data and type of a name value
|
||||||
|
// under key k to value and DWORD.
|
||||||
|
func (k Key) SetDWordValue(name string, value uint32) error {
|
||||||
|
return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQWordValue sets the data and type of a name value
|
||||||
|
// under key k to value and QWORD.
|
||||||
|
func (k Key) SetQWordValue(name string, value uint64) error {
|
||||||
|
return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k Key) setStringValue(name string, valtype uint32, value string) error {
|
||||||
|
v, err := syscall.UTF16FromString(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
|
||||||
|
return k.setValue(name, valtype, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStringValue sets the data and type of a name value
|
||||||
|
// under key k to value and SZ. The value must not contain a zero byte.
|
||||||
|
func (k Key) SetStringValue(name, value string) error {
|
||||||
|
return k.setStringValue(name, SZ, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpandStringValue sets the data and type of a name value
|
||||||
|
// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
|
||||||
|
func (k Key) SetExpandStringValue(name, value string) error {
|
||||||
|
return k.setStringValue(name, EXPAND_SZ, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStringsValue sets the data and type of a name value
|
||||||
|
// under key k to value and MULTI_SZ. The value strings
|
||||||
|
// must not contain a zero byte.
|
||||||
|
func (k Key) SetStringsValue(name string, value []string) error {
|
||||||
|
ss := ""
|
||||||
|
for _, s := range value {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] == 0 {
|
||||||
|
return errors.New("string cannot have 0 inside")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss += s + "\x00"
|
||||||
|
}
|
||||||
|
v := utf16.Encode([]rune(ss + "\x00"))
|
||||||
|
buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
|
||||||
|
return k.setValue(name, MULTI_SZ, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBinaryValue sets the data and type of a name value
|
||||||
|
// under key k to value and BINARY.
|
||||||
|
func (k Key) SetBinaryValue(name string, value []byte) error {
|
||||||
|
return k.setValue(name, BINARY, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteValue removes a named value from the key k.
|
||||||
|
func (k Key) DeleteValue(name string) error {
|
||||||
|
return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadValueNames returns the value names of key k.
|
||||||
|
// The parameter n controls the number of returned names,
|
||||||
|
// analogous to the way os.File.Readdirnames works.
|
||||||
|
func (k Key) ReadValueNames(n int) ([]string, error) {
|
||||||
|
ki, err := k.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
names := make([]string, 0, ki.ValueCount)
|
||||||
|
buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
|
||||||
|
loopItems:
|
||||||
|
for i := uint32(0); ; i++ {
|
||||||
|
if n > 0 {
|
||||||
|
if len(names) == n {
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l := uint32(len(buf))
|
||||||
|
for {
|
||||||
|
err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err == syscall.ERROR_MORE_DATA {
|
||||||
|
// Double buffer size and try again.
|
||||||
|
l = uint32(2 * len(buf))
|
||||||
|
buf = make([]uint16, l)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err == _ERROR_NO_MORE_ITEMS {
|
||||||
|
break loopItems
|
||||||
|
}
|
||||||
|
return names, err
|
||||||
|
}
|
||||||
|
names = append(names, syscall.UTF16ToString(buf[:l]))
|
||||||
|
}
|
||||||
|
if n > len(names) {
|
||||||
|
return names, io.EOF
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
|
@ -0,0 +1,120 @@
|
||||||
|
// Code generated by 'go generate'; DO NOT EDIT.
|
||||||
|
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ unsafe.Pointer
|
||||||
|
|
||||||
|
// Do the interface allocations only once for common
|
||||||
|
// Errno values.
|
||||||
|
const (
|
||||||
|
errnoERROR_IO_PENDING = 997
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||||
|
)
|
||||||
|
|
||||||
|
// errnoErr returns common boxed Errno values, to prevent
|
||||||
|
// allocations at runtime.
|
||||||
|
func errnoErr(e syscall.Errno) error {
|
||||||
|
switch e {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case errnoERROR_IO_PENDING:
|
||||||
|
return errERROR_IO_PENDING
|
||||||
|
}
|
||||||
|
// TODO: add more here, after collecting data on the common
|
||||||
|
// error values see on Windows. (perhaps when running
|
||||||
|
// all.bat?)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
|
||||||
|
procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
|
||||||
|
procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
|
||||||
|
procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
|
||||||
|
procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
|
||||||
|
procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
|
||||||
|
procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW")
|
||||||
|
procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
|
||||||
|
)
|
||||||
|
|
||||||
|
func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
|
||||||
|
if r0 != 0 {
|
||||||
|
regerrno = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
|
||||||
|
r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
|
||||||
|
n = uint32(r0)
|
||||||
|
if n == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,336 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package bidirule implements the Bidi Rule defined by RFC 5893.
|
||||||
|
//
|
||||||
|
// This package is under development. The API may change without notice and
|
||||||
|
// without preserving backward compatibility.
|
||||||
|
package bidirule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
"golang.org/x/text/unicode/bidi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file contains an implementation of RFC 5893: Right-to-Left Scripts for
|
||||||
|
// Internationalized Domain Names for Applications (IDNA)
|
||||||
|
//
|
||||||
|
// A label is an individual component of a domain name. Labels are usually
|
||||||
|
// shown separated by dots; for example, the domain name "www.example.com" is
|
||||||
|
// composed of three labels: "www", "example", and "com".
|
||||||
|
//
|
||||||
|
// An RTL label is a label that contains at least one character of class R, AL,
|
||||||
|
// or AN. An LTR label is any label that is not an RTL label.
|
||||||
|
//
|
||||||
|
// A "Bidi domain name" is a domain name that contains at least one RTL label.
|
||||||
|
//
|
||||||
|
// The following guarantees can be made based on the above:
|
||||||
|
//
|
||||||
|
// o In a domain name consisting of only labels that satisfy the rule,
|
||||||
|
// the requirements of Section 3 are satisfied. Note that even LTR
|
||||||
|
// labels and pure ASCII labels have to be tested.
|
||||||
|
//
|
||||||
|
// o In a domain name consisting of only LDH labels (as defined in the
|
||||||
|
// Definitions document [RFC5890]) and labels that satisfy the rule,
|
||||||
|
// the requirements of Section 3 are satisfied as long as a label
|
||||||
|
// that starts with an ASCII digit does not come after a
|
||||||
|
// right-to-left label.
|
||||||
|
//
|
||||||
|
// No guarantee is given for other combinations.
|
||||||
|
|
||||||
|
// ErrInvalid indicates a label is invalid according to the Bidi Rule.
|
||||||
|
var ErrInvalid = errors.New("bidirule: failed Bidi Rule")
|
||||||
|
|
||||||
|
type ruleState uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
ruleInitial ruleState = iota
|
||||||
|
ruleLTR
|
||||||
|
ruleLTRFinal
|
||||||
|
ruleRTL
|
||||||
|
ruleRTLFinal
|
||||||
|
ruleInvalid
|
||||||
|
)
|
||||||
|
|
||||||
|
type ruleTransition struct {
|
||||||
|
next ruleState
|
||||||
|
mask uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
var transitions = [...][2]ruleTransition{
|
||||||
|
// [2.1] The first character must be a character with Bidi property L, R, or
|
||||||
|
// AL. If it has the R or AL property, it is an RTL label; if it has the L
|
||||||
|
// property, it is an LTR label.
|
||||||
|
ruleInitial: {
|
||||||
|
{ruleLTRFinal, 1 << bidi.L},
|
||||||
|
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL},
|
||||||
|
},
|
||||||
|
ruleRTL: {
|
||||||
|
// [2.3] In an RTL label, the end of the label must be a character with
|
||||||
|
// Bidi property R, AL, EN, or AN, followed by zero or more characters
|
||||||
|
// with Bidi property NSM.
|
||||||
|
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN},
|
||||||
|
|
||||||
|
// [2.2] In an RTL label, only characters with the Bidi properties R,
|
||||||
|
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||||
|
// We exclude the entries from [2.3]
|
||||||
|
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
|
||||||
|
},
|
||||||
|
ruleRTLFinal: {
|
||||||
|
// [2.3] In an RTL label, the end of the label must be a character with
|
||||||
|
// Bidi property R, AL, EN, or AN, followed by zero or more characters
|
||||||
|
// with Bidi property NSM.
|
||||||
|
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN | 1<<bidi.NSM},
|
||||||
|
|
||||||
|
// [2.2] In an RTL label, only characters with the Bidi properties R,
|
||||||
|
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||||
|
// We exclude the entries from [2.3] and NSM.
|
||||||
|
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
|
||||||
|
},
|
||||||
|
ruleLTR: {
|
||||||
|
// [2.6] In an LTR label, the end of the label must be a character with
|
||||||
|
// Bidi property L or EN, followed by zero or more characters with Bidi
|
||||||
|
// property NSM.
|
||||||
|
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN},
|
||||||
|
|
||||||
|
// [2.5] In an LTR label, only characters with the Bidi properties L,
|
||||||
|
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||||
|
// We exclude the entries from [2.6].
|
||||||
|
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
|
||||||
|
},
|
||||||
|
ruleLTRFinal: {
|
||||||
|
// [2.6] In an LTR label, the end of the label must be a character with
|
||||||
|
// Bidi property L or EN, followed by zero or more characters with Bidi
|
||||||
|
// property NSM.
|
||||||
|
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN | 1<<bidi.NSM},
|
||||||
|
|
||||||
|
// [2.5] In an LTR label, only characters with the Bidi properties L,
|
||||||
|
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||||
|
// We exclude the entries from [2.6].
|
||||||
|
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
|
||||||
|
},
|
||||||
|
ruleInvalid: {
|
||||||
|
{ruleInvalid, 0},
|
||||||
|
{ruleInvalid, 0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// [2.4] In an RTL label, if an EN is present, no AN may be present, and
|
||||||
|
// vice versa.
|
||||||
|
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
|
||||||
|
|
||||||
|
// From RFC 5893
|
||||||
|
// An RTL label is a label that contains at least one character of type
|
||||||
|
// R, AL, or AN.
|
||||||
|
//
|
||||||
|
// An LTR label is any label that is not an RTL label.
|
||||||
|
|
||||||
|
// Direction reports the direction of the given label as defined by RFC 5893.
|
||||||
|
// The Bidi Rule does not have to be applied to labels of the category
|
||||||
|
// LeftToRight.
|
||||||
|
func Direction(b []byte) bidi.Direction {
|
||||||
|
for i := 0; i < len(b); {
|
||||||
|
e, sz := bidi.Lookup(b[i:])
|
||||||
|
if sz == 0 {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
c := e.Class()
|
||||||
|
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
||||||
|
return bidi.RightToLeft
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return bidi.LeftToRight
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectionString reports the direction of the given label as defined by RFC
|
||||||
|
// 5893. The Bidi Rule does not have to be applied to labels of the category
|
||||||
|
// LeftToRight.
|
||||||
|
func DirectionString(s string) bidi.Direction {
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
e, sz := bidi.LookupString(s[i:])
|
||||||
|
if sz == 0 {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c := e.Class()
|
||||||
|
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
||||||
|
return bidi.RightToLeft
|
||||||
|
}
|
||||||
|
i += sz
|
||||||
|
}
|
||||||
|
return bidi.LeftToRight
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid reports whether b conforms to the BiDi rule.
|
||||||
|
func Valid(b []byte) bool {
|
||||||
|
var t Transformer
|
||||||
|
if n, ok := t.advance(b); !ok || n < len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.isFinal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidString reports whether s conforms to the BiDi rule.
|
||||||
|
func ValidString(s string) bool {
|
||||||
|
var t Transformer
|
||||||
|
if n, ok := t.advanceString(s); !ok || n < len(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.isFinal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a Transformer that verifies that input adheres to the Bidi Rule.
|
||||||
|
func New() *Transformer {
|
||||||
|
return &Transformer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transformer implements transform.Transform.
|
||||||
|
type Transformer struct {
|
||||||
|
state ruleState
|
||||||
|
hasRTL bool
|
||||||
|
seen uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// A rule can only be violated for "Bidi Domain names", meaning if one of the
|
||||||
|
// following categories has been observed.
|
||||||
|
func (t *Transformer) isRTL() bool {
|
||||||
|
const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN
|
||||||
|
return t.seen&isRTL != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset implements transform.Transformer.
|
||||||
|
func (t *Transformer) Reset() { *t = Transformer{} }
|
||||||
|
|
||||||
|
// Transform implements transform.Transformer. This Transformer has state and
|
||||||
|
// needs to be reset between uses.
|
||||||
|
func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
if len(dst) < len(src) {
|
||||||
|
src = src[:len(dst)]
|
||||||
|
atEOF = false
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
}
|
||||||
|
n, err1 := t.Span(src, atEOF)
|
||||||
|
copy(dst, src[:n])
|
||||||
|
if err == nil || err1 != nil && err1 != transform.ErrShortSrc {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return n, n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Span returns the first n bytes of src that conform to the Bidi rule.
|
||||||
|
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
if t.state == ruleInvalid && t.isRTL() {
|
||||||
|
return 0, ErrInvalid
|
||||||
|
}
|
||||||
|
n, ok := t.advance(src)
|
||||||
|
switch {
|
||||||
|
case !ok:
|
||||||
|
err = ErrInvalid
|
||||||
|
case n < len(src):
|
||||||
|
if !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = ErrInvalid
|
||||||
|
case !t.isFinal():
|
||||||
|
err = ErrInvalid
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precomputing the ASCII values decreases running time for the ASCII fast path
|
||||||
|
// by about 30%.
|
||||||
|
var asciiTable [128]bidi.Properties
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for i := range asciiTable {
|
||||||
|
p, _ := bidi.LookupRune(rune(i))
|
||||||
|
asciiTable[i] = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transformer) advance(s []byte) (n int, ok bool) {
|
||||||
|
var e bidi.Properties
|
||||||
|
var sz int
|
||||||
|
for n < len(s) {
|
||||||
|
if s[n] < utf8.RuneSelf {
|
||||||
|
e, sz = asciiTable[s[n]], 1
|
||||||
|
} else {
|
||||||
|
e, sz = bidi.Lookup(s[n:])
|
||||||
|
if sz <= 1 {
|
||||||
|
if sz == 1 {
|
||||||
|
// We always consider invalid UTF-8 to be invalid, even if
|
||||||
|
// the string has not yet been determined to be RTL.
|
||||||
|
// TODO: is this correct?
|
||||||
|
return n, false
|
||||||
|
}
|
||||||
|
return n, true // incomplete UTF-8 encoding
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: using CompactClass would result in noticeable speedup.
|
||||||
|
// See unicode/bidi/prop.go:Properties.CompactClass.
|
||||||
|
c := uint16(1 << e.Class())
|
||||||
|
t.seen |= c
|
||||||
|
if t.seen&exclusiveRTL == exclusiveRTL {
|
||||||
|
t.state = ruleInvalid
|
||||||
|
return n, false
|
||||||
|
}
|
||||||
|
switch tr := transitions[t.state]; {
|
||||||
|
case tr[0].mask&c != 0:
|
||||||
|
t.state = tr[0].next
|
||||||
|
case tr[1].mask&c != 0:
|
||||||
|
t.state = tr[1].next
|
||||||
|
default:
|
||||||
|
t.state = ruleInvalid
|
||||||
|
if t.isRTL() {
|
||||||
|
return n, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n += sz
|
||||||
|
}
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transformer) advanceString(s string) (n int, ok bool) {
|
||||||
|
var e bidi.Properties
|
||||||
|
var sz int
|
||||||
|
for n < len(s) {
|
||||||
|
if s[n] < utf8.RuneSelf {
|
||||||
|
e, sz = asciiTable[s[n]], 1
|
||||||
|
} else {
|
||||||
|
e, sz = bidi.LookupString(s[n:])
|
||||||
|
if sz <= 1 {
|
||||||
|
if sz == 1 {
|
||||||
|
return n, false // invalid UTF-8
|
||||||
|
}
|
||||||
|
return n, true // incomplete UTF-8 encoding
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: using CompactClass results in noticeable speedup.
|
||||||
|
// See unicode/bidi/prop.go:Properties.CompactClass.
|
||||||
|
c := uint16(1 << e.Class())
|
||||||
|
t.seen |= c
|
||||||
|
if t.seen&exclusiveRTL == exclusiveRTL {
|
||||||
|
t.state = ruleInvalid
|
||||||
|
return n, false
|
||||||
|
}
|
||||||
|
switch tr := transitions[t.state]; {
|
||||||
|
case tr[0].mask&c != 0:
|
||||||
|
t.state = tr[0].next
|
||||||
|
case tr[1].mask&c != 0:
|
||||||
|
t.state = tr[1].next
|
||||||
|
default:
|
||||||
|
t.state = ruleInvalid
|
||||||
|
if t.isRTL() {
|
||||||
|
return n, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n += sz
|
||||||
|
}
|
||||||
|
return n, true
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.10
|
||||||
|
|
||||||
|
package bidirule
|
||||||
|
|
||||||
|
func (t *Transformer) isFinal() bool {
|
||||||
|
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.10
|
||||||
|
|
||||||
|
package bidirule
|
||||||
|
|
||||||
|
func (t *Transformer) isFinal() bool {
|
||||||
|
if !t.isRTL() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||||
|
}
|
|
@ -0,0 +1,198 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:generate go run gen.go gen_trieval.go gen_ranges.go
|
||||||
|
|
||||||
|
// Package bidi contains functionality for bidirectional text support.
|
||||||
|
//
|
||||||
|
// See https://www.unicode.org/reports/tr9.
|
||||||
|
//
|
||||||
|
// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways
|
||||||
|
// and without notice.
|
||||||
|
package bidi // import "golang.org/x/text/unicode/bidi"
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// The following functionality would not be hard to implement, but hinges on
|
||||||
|
// the definition of a Segmenter interface. For now this is up to the user.
|
||||||
|
// - Iterate over paragraphs
|
||||||
|
// - Segmenter to iterate over runs directly from a given text.
|
||||||
|
// Also:
|
||||||
|
// - Transformer for reordering?
|
||||||
|
// - Transformer (validator, really) for Bidi Rule.
|
||||||
|
|
||||||
|
// This API tries to avoid dealing with embedding levels for now. Under the hood
|
||||||
|
// these will be computed, but the question is to which extent the user should
|
||||||
|
// know they exist. We should at some point allow the user to specify an
|
||||||
|
// embedding hierarchy, though.
|
||||||
|
|
||||||
|
// A Direction indicates the overall flow of text.
|
||||||
|
type Direction int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LeftToRight indicates the text contains no right-to-left characters and
|
||||||
|
// that either there are some left-to-right characters or the option
|
||||||
|
// DefaultDirection(LeftToRight) was passed.
|
||||||
|
LeftToRight Direction = iota
|
||||||
|
|
||||||
|
// RightToLeft indicates the text contains no left-to-right characters and
|
||||||
|
// that either there are some right-to-left characters or the option
|
||||||
|
// DefaultDirection(RightToLeft) was passed.
|
||||||
|
RightToLeft
|
||||||
|
|
||||||
|
// Mixed indicates text contains both left-to-right and right-to-left
|
||||||
|
// characters.
|
||||||
|
Mixed
|
||||||
|
|
||||||
|
// Neutral means that text contains no left-to-right and right-to-left
|
||||||
|
// characters and that no default direction has been set.
|
||||||
|
Neutral
|
||||||
|
)
|
||||||
|
|
||||||
|
type options struct{}
|
||||||
|
|
||||||
|
// An Option is an option for Bidi processing.
|
||||||
|
type Option func(*options)
|
||||||
|
|
||||||
|
// ICU allows the user to define embedding levels. This may be used, for example,
|
||||||
|
// to use hierarchical structure of markup languages to define embeddings.
|
||||||
|
// The following option may be a way to expose this functionality in this API.
|
||||||
|
// // LevelFunc sets a function that associates nesting levels with the given text.
|
||||||
|
// // The levels function will be called with monotonically increasing values for p.
|
||||||
|
// func LevelFunc(levels func(p int) int) Option {
|
||||||
|
// panic("unimplemented")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// DefaultDirection sets the default direction for a Paragraph. The direction is
|
||||||
|
// overridden if the text contains directional characters.
|
||||||
|
func DefaultDirection(d Direction) Option {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Paragraph holds a single Paragraph for Bidi processing.
|
||||||
|
type Paragraph struct {
|
||||||
|
// buffers
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBytes configures p for the given paragraph text. It replaces text
|
||||||
|
// previously set by SetBytes or SetString. If b contains a paragraph separator
|
||||||
|
// it will only process the first paragraph and report the number of bytes
|
||||||
|
// consumed from b including this separator. Error may be non-nil if options are
|
||||||
|
// given.
|
||||||
|
func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetString configures p for the given paragraph text. It replaces text
|
||||||
|
// previously set by SetBytes or SetString. If b contains a paragraph separator
|
||||||
|
// it will only process the first paragraph and report the number of bytes
|
||||||
|
// consumed from b including this separator. Error may be non-nil if options are
|
||||||
|
// given.
|
||||||
|
func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLeftToRight reports whether the principle direction of rendering for this
|
||||||
|
// paragraphs is left-to-right. If this returns false, the principle direction
|
||||||
|
// of rendering is right-to-left.
|
||||||
|
func (p *Paragraph) IsLeftToRight() bool {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direction returns the direction of the text of this paragraph.
|
||||||
|
//
|
||||||
|
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
|
||||||
|
func (p *Paragraph) Direction() Direction {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAt reports the Run at the given position of the input text.
|
||||||
|
//
|
||||||
|
// This method can be used for computing line breaks on paragraphs.
|
||||||
|
func (p *Paragraph) RunAt(pos int) Run {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order computes the visual ordering of all the runs in a Paragraph.
|
||||||
|
func (p *Paragraph) Order() (Ordering, error) {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Line computes the visual ordering of runs for a single line starting and
|
||||||
|
// ending at the given positions in the original text.
|
||||||
|
func (p *Paragraph) Line(start, end int) (Ordering, error) {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Ordering holds the computed visual order of runs of a Paragraph. Calling
|
||||||
|
// SetBytes or SetString on the originating Paragraph invalidates an Ordering.
|
||||||
|
// The methods of an Ordering should only be called by one goroutine at a time.
|
||||||
|
type Ordering struct{}
|
||||||
|
|
||||||
|
// Direction reports the directionality of the runs.
|
||||||
|
//
|
||||||
|
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
|
||||||
|
func (o *Ordering) Direction() Direction {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumRuns returns the number of runs.
|
||||||
|
func (o *Ordering) NumRuns() int {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run returns the ith run within the ordering.
|
||||||
|
func (o *Ordering) Run(i int) Run {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: perhaps with options.
|
||||||
|
// // Reorder creates a reader that reads the runes in visual order per character.
|
||||||
|
// // Modifiers remain after the runes they modify.
|
||||||
|
// func (l *Runs) Reorder() io.Reader {
|
||||||
|
// panic("unimplemented")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// A Run is a continuous sequence of characters of a single direction.
|
||||||
|
type Run struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the text of the run in its original order.
|
||||||
|
func (r *Run) String() string {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the text of the run in its original order.
|
||||||
|
func (r *Run) Bytes() []byte {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: methods for
|
||||||
|
// - Display order
|
||||||
|
// - headers and footers
|
||||||
|
// - bracket replacement.
|
||||||
|
|
||||||
|
// Direction reports the direction of the run.
|
||||||
|
func (r *Run) Direction() Direction {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position of the Run within the text passed to SetBytes or SetString of the
|
||||||
|
// originating Paragraph value.
|
||||||
|
func (r *Run) Pos() (start, end int) {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendReverse reverses the order of characters of in, appends them to out,
|
||||||
|
// and returns the result. Modifiers will still follow the runes they modify.
|
||||||
|
// Brackets are replaced with their counterparts.
|
||||||
|
func AppendReverse(out, in []byte) []byte {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReverseString reverses the order of characters in s and returns a new string.
|
||||||
|
// Modifiers will still follow the runes they modify. Brackets are replaced with
|
||||||
|
// their counterparts.
|
||||||
|
func ReverseString(s string) string {
|
||||||
|
panic("unimplemented")
|
||||||
|
}
|
|
@ -0,0 +1,335 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package bidi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file contains a port of the reference implementation of the
|
||||||
|
// Bidi Parentheses Algorithm:
|
||||||
|
// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
|
||||||
|
//
|
||||||
|
// The implementation in this file covers definitions BD14-BD16 and rule N0
|
||||||
|
// of UAX#9.
|
||||||
|
//
|
||||||
|
// Some preprocessing is done for each rune before data is passed to this
|
||||||
|
// algorithm:
|
||||||
|
// - opening and closing brackets are identified
|
||||||
|
// - a bracket pair type, like '(' and ')' is assigned a unique identifier that
|
||||||
|
// is identical for the opening and closing bracket. It is left to do these
|
||||||
|
// mappings.
|
||||||
|
// - The BPA algorithm requires that bracket characters that are canonical
|
||||||
|
// equivalents of each other be able to be substituted for each other.
|
||||||
|
// It is the responsibility of the caller to do this canonicalization.
|
||||||
|
//
|
||||||
|
// In implementing BD16, this implementation departs slightly from the "logical"
|
||||||
|
// algorithm defined in UAX#9. In particular, the stack referenced there
|
||||||
|
// supports operations that go beyond a "basic" stack. An equivalent
|
||||||
|
// implementation based on a linked list is used here.
|
||||||
|
|
||||||
|
// Bidi_Paired_Bracket_Type
|
||||||
|
// BD14. An opening paired bracket is a character whose
|
||||||
|
// Bidi_Paired_Bracket_Type property value is Open.
|
||||||
|
//
|
||||||
|
// BD15. A closing paired bracket is a character whose
|
||||||
|
// Bidi_Paired_Bracket_Type property value is Close.
|
||||||
|
type bracketType byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
bpNone bracketType = iota
|
||||||
|
bpOpen
|
||||||
|
bpClose
|
||||||
|
)
|
||||||
|
|
||||||
|
// bracketPair holds a pair of index values for opening and closing bracket
|
||||||
|
// location of a bracket pair.
|
||||||
|
type bracketPair struct {
|
||||||
|
opener int
|
||||||
|
closer int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bracketPair) String() string {
|
||||||
|
return fmt.Sprintf("(%v, %v)", b.opener, b.closer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bracketPairs is a slice of bracketPairs with a sort.Interface implementation.
|
||||||
|
type bracketPairs []bracketPair
|
||||||
|
|
||||||
|
func (b bracketPairs) Len() int { return len(b) }
|
||||||
|
func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener }
|
||||||
|
|
||||||
|
// resolvePairedBrackets runs the paired bracket part of the UBA algorithm.
|
||||||
|
//
|
||||||
|
// For each rune, it takes the indexes into the original string, the class the
|
||||||
|
// bracket type (in pairTypes) and the bracket identifier (pairValues). It also
|
||||||
|
// takes the direction type for the start-of-sentence and the embedding level.
|
||||||
|
//
|
||||||
|
// The identifiers for bracket types are the rune of the canonicalized opening
|
||||||
|
// bracket for brackets (open or close) or 0 for runes that are not brackets.
|
||||||
|
func resolvePairedBrackets(s *isolatingRunSequence) {
|
||||||
|
p := bracketPairer{
|
||||||
|
sos: s.sos,
|
||||||
|
openers: list.New(),
|
||||||
|
codesIsolatedRun: s.types,
|
||||||
|
indexes: s.indexes,
|
||||||
|
}
|
||||||
|
dirEmbed := L
|
||||||
|
if s.level&1 != 0 {
|
||||||
|
dirEmbed = R
|
||||||
|
}
|
||||||
|
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
|
||||||
|
p.resolveBrackets(dirEmbed, s.p.initialTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bracketPairer struct {
|
||||||
|
sos Class // direction corresponding to start of sequence
|
||||||
|
|
||||||
|
// The following is a restatement of BD 16 using non-algorithmic language.
|
||||||
|
//
|
||||||
|
// A bracket pair is a pair of characters consisting of an opening
|
||||||
|
// paired bracket and a closing paired bracket such that the
|
||||||
|
// Bidi_Paired_Bracket property value of the former equals the latter,
|
||||||
|
// subject to the following constraints.
|
||||||
|
// - both characters of a pair occur in the same isolating run sequence
|
||||||
|
// - the closing character of a pair follows the opening character
|
||||||
|
// - any bracket character can belong at most to one pair, the earliest possible one
|
||||||
|
// - any bracket character not part of a pair is treated like an ordinary character
|
||||||
|
// - pairs may nest properly, but their spans may not overlap otherwise
|
||||||
|
|
||||||
|
// Bracket characters with canonical decompositions are supposed to be
|
||||||
|
// treated as if they had been normalized, to allow normalized and non-
|
||||||
|
// normalized text to give the same result. In this implementation that step
|
||||||
|
// is pushed out to the caller. The caller has to ensure that the pairValue
|
||||||
|
// slices contain the rune of the opening bracket after normalization for
|
||||||
|
// any opening or closing bracket.
|
||||||
|
|
||||||
|
openers *list.List // list of positions for opening brackets
|
||||||
|
|
||||||
|
// bracket pair positions sorted by location of opening bracket
|
||||||
|
pairPositions bracketPairs
|
||||||
|
|
||||||
|
codesIsolatedRun []Class // directional bidi codes for an isolated run
|
||||||
|
indexes []int // array of index values into the original string
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchOpener reports whether characters at given positions form a matching
|
||||||
|
// bracket pair.
|
||||||
|
func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool {
|
||||||
|
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxPairingDepth = 63
|
||||||
|
|
||||||
|
// locateBrackets locates matching bracket pairs according to BD16.
|
||||||
|
//
|
||||||
|
// This implementation uses a linked list instead of a stack, because, while
|
||||||
|
// elements are added at the front (like a push) they are not generally removed
|
||||||
|
// in atomic 'pop' operations, reducing the benefit of the stack archetype.
|
||||||
|
func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) {
|
||||||
|
// traverse the run
|
||||||
|
// do that explicitly (not in a for-each) so we can record position
|
||||||
|
for i, index := range p.indexes {
|
||||||
|
|
||||||
|
// look at the bracket type for each character
|
||||||
|
if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
|
||||||
|
// continue scanning
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch pairTypes[index] {
|
||||||
|
case bpOpen:
|
||||||
|
// check if maximum pairing depth reached
|
||||||
|
if p.openers.Len() == maxPairingDepth {
|
||||||
|
p.openers.Init()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// remember opener location, most recent first
|
||||||
|
p.openers.PushFront(i)
|
||||||
|
|
||||||
|
case bpClose:
|
||||||
|
// see if there is a match
|
||||||
|
count := 0
|
||||||
|
for elem := p.openers.Front(); elem != nil; elem = elem.Next() {
|
||||||
|
count++
|
||||||
|
opener := elem.Value.(int)
|
||||||
|
if p.matchOpener(pairValues, opener, i) {
|
||||||
|
// if the opener matches, add nested pair to the ordered list
|
||||||
|
p.pairPositions = append(p.pairPositions, bracketPair{opener, i})
|
||||||
|
// remove up to and including matched opener
|
||||||
|
for ; count > 0; count-- {
|
||||||
|
p.openers.Remove(p.openers.Front())
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(p.pairPositions)
|
||||||
|
// if we get here, the closing bracket matched no openers
|
||||||
|
// and gets ignored
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bracket pairs within an isolating run sequence are processed as units so
|
||||||
|
// that both the opening and the closing paired bracket in a pair resolve to
|
||||||
|
// the same direction.
|
||||||
|
//
|
||||||
|
// N0. Process bracket pairs in an isolating run sequence sequentially in
|
||||||
|
// the logical order of the text positions of the opening paired brackets
|
||||||
|
// using the logic given below. Within this scope, bidirectional types EN
|
||||||
|
// and AN are treated as R.
|
||||||
|
//
|
||||||
|
// Identify the bracket pairs in the current isolating run sequence
|
||||||
|
// according to BD16. For each bracket-pair element in the list of pairs of
|
||||||
|
// text positions:
|
||||||
|
//
|
||||||
|
// a Inspect the bidirectional types of the characters enclosed within the
|
||||||
|
// bracket pair.
|
||||||
|
//
|
||||||
|
// b If any strong type (either L or R) matching the embedding direction is
|
||||||
|
// found, set the type for both brackets in the pair to match the embedding
|
||||||
|
// direction.
|
||||||
|
//
|
||||||
|
// o [ e ] o -> o e e e o
|
||||||
|
//
|
||||||
|
// o [ o e ] -> o e o e e
|
||||||
|
//
|
||||||
|
// o [ NI e ] -> o e NI e e
|
||||||
|
//
|
||||||
|
// c Otherwise, if a strong type (opposite the embedding direction) is
|
||||||
|
// found, test for adjacent strong types as follows: 1 First, check
|
||||||
|
// backwards before the opening paired bracket until the first strong type
|
||||||
|
// (L, R, or sos) is found. If that first preceding strong type is opposite
|
||||||
|
// the embedding direction, then set the type for both brackets in the pair
|
||||||
|
// to that type. 2 Otherwise, set the type for both brackets in the pair to
|
||||||
|
// the embedding direction.
|
||||||
|
//
|
||||||
|
// o [ o ] e -> o o o o e
|
||||||
|
//
|
||||||
|
// o [ o NI ] o -> o o o NI o o
|
||||||
|
//
|
||||||
|
// e [ o ] o -> e e o e o
|
||||||
|
//
|
||||||
|
// e [ o ] e -> e e o e e
|
||||||
|
//
|
||||||
|
// e ( o [ o ] NI ) e -> e e o o o o NI e e
|
||||||
|
//
|
||||||
|
// d Otherwise, do not set the type for the current bracket pair. Note that
|
||||||
|
// if the enclosed text contains no strong types the paired brackets will
|
||||||
|
// both resolve to the same level when resolved individually using rules N1
|
||||||
|
// and N2.
|
||||||
|
//
|
||||||
|
// e ( NI ) o -> e ( NI ) o
|
||||||
|
|
||||||
|
// getStrongTypeN0 maps character's directional code to strong type as required
|
||||||
|
// by rule N0.
|
||||||
|
//
|
||||||
|
// TODO: have separate type for "strong" directionality.
|
||||||
|
func (p *bracketPairer) getStrongTypeN0(index int) Class {
|
||||||
|
switch p.codesIsolatedRun[index] {
|
||||||
|
// in the scope of N0, number types are treated as R
|
||||||
|
case EN, AN, AL, R:
|
||||||
|
return R
|
||||||
|
case L:
|
||||||
|
return L
|
||||||
|
default:
|
||||||
|
return ON
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// classifyPairContent reports the strong types contained inside a Bracket Pair,
|
||||||
|
// assuming the given embedding direction.
|
||||||
|
//
|
||||||
|
// It returns ON if no strong type is found. If a single strong type is found,
|
||||||
|
// it returns this type. Otherwise it returns the embedding direction.
|
||||||
|
//
|
||||||
|
// TODO: use separate type for "strong" directionality.
|
||||||
|
func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class {
|
||||||
|
dirOpposite := ON
|
||||||
|
for i := loc.opener + 1; i < loc.closer; i++ {
|
||||||
|
dir := p.getStrongTypeN0(i)
|
||||||
|
if dir == ON {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if dir == dirEmbed {
|
||||||
|
return dir // type matching embedding direction found
|
||||||
|
}
|
||||||
|
dirOpposite = dir
|
||||||
|
}
|
||||||
|
// return ON if no strong type found, or class opposite to dirEmbed
|
||||||
|
return dirOpposite
|
||||||
|
}
|
||||||
|
|
||||||
|
// classBeforePair determines which strong types are present before a Bracket
|
||||||
|
// Pair. Return R or L if strong type found, otherwise ON.
|
||||||
|
func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
|
||||||
|
for i := loc.opener - 1; i >= 0; i-- {
|
||||||
|
if dir := p.getStrongTypeN0(i); dir != ON {
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// no strong types found, return sos
|
||||||
|
return p.sos
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignBracketType implements rule N0 for a single bracket pair.
|
||||||
|
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
|
||||||
|
// rule "N0, a", inspect contents of pair
|
||||||
|
dirPair := p.classifyPairContent(loc, dirEmbed)
|
||||||
|
|
||||||
|
// dirPair is now L, R, or N (no strong type found)
|
||||||
|
|
||||||
|
// the following logical tests are performed out of order compared to
|
||||||
|
// the statement of the rules but yield the same results
|
||||||
|
if dirPair == ON {
|
||||||
|
return // case "d" - nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
if dirPair != dirEmbed {
|
||||||
|
// case "c": strong type found, opposite - check before (c.1)
|
||||||
|
dirPair = p.classBeforePair(loc)
|
||||||
|
if dirPair == dirEmbed || dirPair == ON {
|
||||||
|
// no strong opposite type found before - use embedding (c.2)
|
||||||
|
dirPair = dirEmbed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// else: case "b", strong type found matching embedding,
|
||||||
|
// no explicit action needed, as dirPair is already set to embedding
|
||||||
|
// direction
|
||||||
|
|
||||||
|
// set the bracket types to the type found
|
||||||
|
p.setBracketsToType(loc, dirPair, initialTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
|
||||||
|
p.codesIsolatedRun[loc.opener] = dirPair
|
||||||
|
p.codesIsolatedRun[loc.closer] = dirPair
|
||||||
|
|
||||||
|
for i := loc.opener + 1; i < loc.closer; i++ {
|
||||||
|
index := p.indexes[i]
|
||||||
|
if initialTypes[index] != NSM {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.codesIsolatedRun[i] = dirPair
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := loc.closer + 1; i < len(p.indexes); i++ {
|
||||||
|
index := p.indexes[i]
|
||||||
|
if initialTypes[index] != NSM {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.codesIsolatedRun[i] = dirPair
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveBrackets implements rule N0 for a list of pairs.
|
||||||
|
func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
|
||||||
|
for _, loc := range p.pairPositions {
|
||||||
|
p.assignBracketType(loc, dirEmbed, initialTypes)
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,206 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package bidi
|
||||||
|
|
||||||
|
import "unicode/utf8"
|
||||||
|
|
||||||
|
// Properties provides access to BiDi properties of runes.
|
||||||
|
type Properties struct {
|
||||||
|
entry uint8
|
||||||
|
last uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
var trie = newBidiTrie(0)
|
||||||
|
|
||||||
|
// TODO: using this for bidirule reduces the running time by about 5%. Consider
|
||||||
|
// if this is worth exposing or if we can find a way to speed up the Class
|
||||||
|
// method.
|
||||||
|
//
|
||||||
|
// // CompactClass is like Class, but maps all of the BiDi control classes
|
||||||
|
// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control.
|
||||||
|
// func (p Properties) CompactClass() Class {
|
||||||
|
// return Class(p.entry & 0x0F)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Class returns the Bidi class for p.
|
||||||
|
func (p Properties) Class() Class {
|
||||||
|
c := Class(p.entry & 0x0F)
|
||||||
|
if c == Control {
|
||||||
|
c = controlByteToClass[p.last&0xF]
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBracket reports whether the rune is a bracket.
|
||||||
|
func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 }
|
||||||
|
|
||||||
|
// IsOpeningBracket reports whether the rune is an opening bracket.
|
||||||
|
// IsBracket must return true.
|
||||||
|
func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 }
|
||||||
|
|
||||||
|
// TODO: find a better API and expose.
|
||||||
|
func (p Properties) reverseBracket(r rune) rune {
|
||||||
|
return xorMasks[p.entry>>xorMaskShift] ^ r
|
||||||
|
}
|
||||||
|
|
||||||
|
var controlByteToClass = [16]Class{
|
||||||
|
0xD: LRO, // U+202D LeftToRightOverride,
|
||||||
|
0xE: RLO, // U+202E RightToLeftOverride,
|
||||||
|
0xA: LRE, // U+202A LeftToRightEmbedding,
|
||||||
|
0xB: RLE, // U+202B RightToLeftEmbedding,
|
||||||
|
0xC: PDF, // U+202C PopDirectionalFormat,
|
||||||
|
0x6: LRI, // U+2066 LeftToRightIsolate,
|
||||||
|
0x7: RLI, // U+2067 RightToLeftIsolate,
|
||||||
|
0x8: FSI, // U+2068 FirstStrongIsolate,
|
||||||
|
0x9: PDI, // U+2069 PopDirectionalIsolate,
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupRune returns properties for r.
|
||||||
|
func LookupRune(r rune) (p Properties, size int) {
|
||||||
|
var buf [4]byte
|
||||||
|
n := utf8.EncodeRune(buf[:], r)
|
||||||
|
return Lookup(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: these lookup methods are based on the generated trie code. The returned
|
||||||
|
// sizes have slightly different semantics from the generated code, in that it
|
||||||
|
// always returns size==1 for an illegal UTF-8 byte (instead of the length
|
||||||
|
// of the maximum invalid subsequence). Most Transformers, like unicode/norm,
|
||||||
|
// leave invalid UTF-8 untouched, in which case it has performance benefits to
|
||||||
|
// do so (without changing the semantics). Bidi requires the semantics used here
|
||||||
|
// for the bidirule implementation to be compatible with the Go semantics.
|
||||||
|
// They ultimately should perhaps be adopted by all trie implementations, for
|
||||||
|
// convenience sake.
|
||||||
|
// This unrolled code also boosts performance of the secure/bidirule package by
|
||||||
|
// about 30%.
|
||||||
|
// So, to remove this code:
|
||||||
|
// - add option to trie generator to define return type.
|
||||||
|
// - always return 1 byte size for ill-formed UTF-8 runes.
|
||||||
|
|
||||||
|
// Lookup returns properties for the first rune in s and the width in bytes of
|
||||||
|
// its encoding. The size will be 0 if s does not hold enough bytes to complete
|
||||||
|
// the encoding.
|
||||||
|
func Lookup(s []byte) (p Properties, sz int) {
|
||||||
|
c0 := s[0]
|
||||||
|
switch {
|
||||||
|
case c0 < 0x80: // is ASCII
|
||||||
|
return Properties{entry: bidiValues[c0]}, 1
|
||||||
|
case c0 < 0xC2:
|
||||||
|
return Properties{}, 1
|
||||||
|
case c0 < 0xE0: // 2-byte UTF-8
|
||||||
|
if len(s) < 2 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
|
||||||
|
case c0 < 0xF0: // 3-byte UTF-8
|
||||||
|
if len(s) < 3 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o := uint32(i)<<6 + uint32(c1)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c2 := s[2]
|
||||||
|
if c2 < 0x80 || 0xC0 <= c2 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
|
||||||
|
case c0 < 0xF8: // 4-byte UTF-8
|
||||||
|
if len(s) < 4 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o := uint32(i)<<6 + uint32(c1)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c2 := s[2]
|
||||||
|
if c2 < 0x80 || 0xC0 <= c2 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o = uint32(i)<<6 + uint32(c2)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c3 := s[3]
|
||||||
|
if c3 < 0x80 || 0xC0 <= c3 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
|
||||||
|
}
|
||||||
|
// Illegal rune
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupString returns properties for the first rune in s and the width in
|
||||||
|
// bytes of its encoding. The size will be 0 if s does not hold enough bytes to
|
||||||
|
// complete the encoding.
|
||||||
|
func LookupString(s string) (p Properties, sz int) {
|
||||||
|
c0 := s[0]
|
||||||
|
switch {
|
||||||
|
case c0 < 0x80: // is ASCII
|
||||||
|
return Properties{entry: bidiValues[c0]}, 1
|
||||||
|
case c0 < 0xC2:
|
||||||
|
return Properties{}, 1
|
||||||
|
case c0 < 0xE0: // 2-byte UTF-8
|
||||||
|
if len(s) < 2 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
|
||||||
|
case c0 < 0xF0: // 3-byte UTF-8
|
||||||
|
if len(s) < 3 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o := uint32(i)<<6 + uint32(c1)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c2 := s[2]
|
||||||
|
if c2 < 0x80 || 0xC0 <= c2 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
|
||||||
|
case c0 < 0xF8: // 4-byte UTF-8
|
||||||
|
if len(s) < 4 {
|
||||||
|
return Properties{}, 0
|
||||||
|
}
|
||||||
|
i := bidiIndex[c0]
|
||||||
|
c1 := s[1]
|
||||||
|
if c1 < 0x80 || 0xC0 <= c1 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o := uint32(i)<<6 + uint32(c1)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c2 := s[2]
|
||||||
|
if c2 < 0x80 || 0xC0 <= c2 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
o = uint32(i)<<6 + uint32(c2)
|
||||||
|
i = bidiIndex[o]
|
||||||
|
c3 := s[3]
|
||||||
|
if c3 < 0x80 || 0xC0 <= c3 {
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
||||||
|
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
|
||||||
|
}
|
||||||
|
// Illegal rune
|
||||||
|
return Properties{}, 1
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,60 @@
|
||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
package bidi
|
||||||
|
|
||||||
|
// Class is the Unicode BiDi class. Each rune has a single class.
|
||||||
|
type Class uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
L Class = iota // LeftToRight
|
||||||
|
R // RightToLeft
|
||||||
|
EN // EuropeanNumber
|
||||||
|
ES // EuropeanSeparator
|
||||||
|
ET // EuropeanTerminator
|
||||||
|
AN // ArabicNumber
|
||||||
|
CS // CommonSeparator
|
||||||
|
B // ParagraphSeparator
|
||||||
|
S // SegmentSeparator
|
||||||
|
WS // WhiteSpace
|
||||||
|
ON // OtherNeutral
|
||||||
|
BN // BoundaryNeutral
|
||||||
|
NSM // NonspacingMark
|
||||||
|
AL // ArabicLetter
|
||||||
|
Control // Control LRO - PDI
|
||||||
|
|
||||||
|
numClass
|
||||||
|
|
||||||
|
LRO // LeftToRightOverride
|
||||||
|
RLO // RightToLeftOverride
|
||||||
|
LRE // LeftToRightEmbedding
|
||||||
|
RLE // RightToLeftEmbedding
|
||||||
|
PDF // PopDirectionalFormat
|
||||||
|
LRI // LeftToRightIsolate
|
||||||
|
RLI // RightToLeftIsolate
|
||||||
|
FSI // FirstStrongIsolate
|
||||||
|
PDI // PopDirectionalIsolate
|
||||||
|
|
||||||
|
unknownClass = ^Class(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
var controlToClass = map[rune]Class{
|
||||||
|
0x202D: LRO, // LeftToRightOverride,
|
||||||
|
0x202E: RLO, // RightToLeftOverride,
|
||||||
|
0x202A: LRE, // LeftToRightEmbedding,
|
||||||
|
0x202B: RLE, // RightToLeftEmbedding,
|
||||||
|
0x202C: PDF, // PopDirectionalFormat,
|
||||||
|
0x2066: LRI, // LeftToRightIsolate,
|
||||||
|
0x2067: RLI, // RightToLeftIsolate,
|
||||||
|
0x2068: FSI, // FirstStrongIsolate,
|
||||||
|
0x2069: PDI, // PopDirectionalIsolate,
|
||||||
|
}
|
||||||
|
|
||||||
|
// A trie entry has the following bits:
|
||||||
|
// 7..5 XOR mask for brackets
|
||||||
|
// 4 1: Bracket open, 0: Bracket close
|
||||||
|
// 3..0 Class type
|
||||||
|
|
||||||
|
const (
|
||||||
|
openMask = 0x10
|
||||||
|
xorMaskShift = 5
|
||||||
|
)
|
|
@ -3,16 +3,16 @@
|
||||||
"ignore": "test",
|
"ignore": "test",
|
||||||
"package": [
|
"package": [
|
||||||
{
|
{
|
||||||
"checksumSHA1": "z+M6FYl9EKsoZZMLcT0Ktwfk8pI=",
|
"checksumSHA1": "oKluEwtvvBjw/U7HuxJerq9zM6c=",
|
||||||
"path": "github.com/Azure/azure-pipeline-go/pipeline",
|
"path": "github.com/Azure/azure-pipeline-go/pipeline",
|
||||||
"revision": "7571e8eb0876932ab505918ff7ed5107773e5ee2",
|
"revision": "232aee85e8e3a6223a11c0943f7df2ae0fac00e4",
|
||||||
"revisionTime": "2018-06-07T21:19:23Z"
|
"revisionTime": "2019-07-08T20:54:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5nsGu77r69lloEWbFhMof2UA9rY=",
|
"checksumSHA1": "w+q8h00Uc7cHkEGeb8KJa6LjGBw=",
|
||||||
"path": "github.com/Azure/azure-storage-blob-go/2018-03-28/azblob",
|
"path": "github.com/Azure/azure-storage-blob-go/azblob",
|
||||||
"revision": "eaae161d9d5e07363f04ddb19d84d57efc66d1a1",
|
"revision": "3efca72bd11c050222deab57e25ea90df03b9692",
|
||||||
"revisionTime": "2018-07-12T00:56:34Z"
|
"revisionTime": "2019-06-28T22:37:48Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "QC55lHNOv1+UAL2xtIHw17MJ8J8=",
|
"checksumSHA1": "QC55lHNOv1+UAL2xtIHw17MJ8J8=",
|
||||||
|
@ -262,6 +262,12 @@
|
||||||
"revisionTime": "2018-03-10T13:32:14Z",
|
"revisionTime": "2018-03-10T13:32:14Z",
|
||||||
"version": "efa589957cd060542a26d2dd7832fd6a6c6c3ade"
|
"version": "efa589957cd060542a26d2dd7832fd6a6c6c3ade"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "hkwfDRrenCt8GFFKMWtweohbWa0=",
|
||||||
|
"path": "github.com/mattn/go-ieproxy",
|
||||||
|
"revision": "6dee0af9227d0863f1508ce7937af3396d6605c1",
|
||||||
|
"revisionTime": "2019-07-02T01:03:15Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "GiVgQkx5acnq+JZtYiuHPlhHoso=",
|
"checksumSHA1": "GiVgQkx5acnq+JZtYiuHPlhHoso=",
|
||||||
"path": "github.com/mattn/go-isatty",
|
"path": "github.com/mattn/go-isatty",
|
||||||
|
@ -670,6 +676,18 @@
|
||||||
"revision": "eb5bcb51f2a31c7d5141d810b70815c05d9c9146",
|
"revision": "eb5bcb51f2a31c7d5141d810b70815c05d9c9146",
|
||||||
"revisionTime": "2019-04-03T01:06:53Z"
|
"revisionTime": "2019-04-03T01:06:53Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "yNR4RokwDc+lYbCL/yOB3ENuKH8=",
|
||||||
|
"path": "golang.org/x/net/http/httpproxy",
|
||||||
|
"revision": "da137c7871d730100384dbcf36e6f8fa493aef5b",
|
||||||
|
"revisionTime": "2019-06-28T18:40:41Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "vL6l4FZWitsxht0uqA/GpDNkNNc=",
|
||||||
|
"path": "golang.org/x/net/idna",
|
||||||
|
"revision": "da137c7871d730100384dbcf36e6f8fa493aef5b",
|
||||||
|
"revisionTime": "2019-06-28T18:40:41Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "F+tqxPGFt5x7DKZakbbMmENX1oQ=",
|
"checksumSHA1": "F+tqxPGFt5x7DKZakbbMmENX1oQ=",
|
||||||
"path": "golang.org/x/net/websocket",
|
"path": "golang.org/x/net/websocket",
|
||||||
|
@ -700,6 +718,12 @@
|
||||||
"revision": "e77772198cdc3dbfcf7f2de96630204df9fd3a0b",
|
"revision": "e77772198cdc3dbfcf7f2de96630204df9fd3a0b",
|
||||||
"revisionTime": "2019-02-14T21:28:15Z"
|
"revisionTime": "2019-02-14T21:28:15Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "FCBHX83YaM1LmNHtSM30BKmbJQY=",
|
||||||
|
"path": "golang.org/x/sys/windows/registry",
|
||||||
|
"revision": "fae7ac547cb717d141c433a2a173315e216b64c4",
|
||||||
|
"revisionTime": "2019-07-11T09:16:12Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "tqqo7DEeFCclb58XbN44WwdpWww=",
|
"checksumSHA1": "tqqo7DEeFCclb58XbN44WwdpWww=",
|
||||||
"path": "golang.org/x/text/encoding",
|
"path": "golang.org/x/text/encoding",
|
||||||
|
@ -796,12 +820,24 @@
|
||||||
"revision": "e3703dcdd614d2d7488fff034c75c551ea25da95",
|
"revision": "e3703dcdd614d2d7488fff034c75c551ea25da95",
|
||||||
"revisionTime": "2018-12-15T16:57:46Z"
|
"revisionTime": "2018-12-15T16:57:46Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "CbpjEkkOeh0fdM/V8xKDdI0AA88=",
|
||||||
|
"path": "golang.org/x/text/secure/bidirule",
|
||||||
|
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
|
||||||
|
"revisionTime": "2018-12-15T17:52:45Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "o3YChxWLvyCmkAn/ZNBj9HC9zKw=",
|
"checksumSHA1": "o3YChxWLvyCmkAn/ZNBj9HC9zKw=",
|
||||||
"path": "golang.org/x/text/transform",
|
"path": "golang.org/x/text/transform",
|
||||||
"revision": "e3703dcdd614d2d7488fff034c75c551ea25da95",
|
"revision": "e3703dcdd614d2d7488fff034c75c551ea25da95",
|
||||||
"revisionTime": "2018-12-15T16:57:46Z"
|
"revisionTime": "2018-12-15T16:57:46Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "vv9EDuekZgHxFbh+0jJhB7jLZXY=",
|
||||||
|
"path": "golang.org/x/text/unicode/bidi",
|
||||||
|
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
|
||||||
|
"revisionTime": "2018-12-15T17:52:45Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "lBB8oUHgIK0RUuDchkQVfMXJQh0=",
|
"checksumSHA1": "lBB8oUHgIK0RUuDchkQVfMXJQh0=",
|
||||||
"path": "golang.org/x/text/unicode/norm",
|
"path": "golang.org/x/text/unicode/norm",
|
||||||
|
|
Loading…
Reference in New Issue