internal, vendor: update Azure blobstore API

This commit is contained in:
Péter Szilágyi 2018-07-25 17:51:24 +03:00
parent 7abedf9bbb
commit 6b232ce325
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
99 changed files with 14834 additions and 8143 deletions

View File

@ -1018,23 +1018,14 @@ func doPurge(cmdline []string) {
}
for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ {
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
if err != nil {
log.Fatal(err)
}
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
if err != nil {
log.Fatal(err)
}
if iTime.After(jTime) {
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i]
}
}
}
// Filter out all archives more recent that the given threshold
for i, blob := range blobs {
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i]
break
}

View File

@ -17,10 +17,12 @@
package build
import (
"context"
"fmt"
"net/url"
"os"
storage "github.com/Azure/azure-storage-go"
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
)
// AzureBlobstoreConfig is an authentication and configuration struct containing
@ -43,11 +45,14 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
return nil
}
// Create an authenticated client against the Azure cloud
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
if err != nil {
return err
}
client := rawClient.GetBlobService()
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
service := azblob.NewServiceURL(*u, pipeline)
container := service.NewContainerURL(config.Container)
blockblob := container.NewBlockBlobURL(name)
// Stream the file to upload into the designated blobstore container
in, err := os.Open(path)
@ -56,38 +61,33 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
}
defer in.Close()
info, err := in.Stat()
if err != nil {
_, err = blockblob.Upload(context.Background(), in, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
return err
}
return client.CreateBlockBlobFromReader(config.Container, name, uint64(info.Size()), in, nil)
}
// AzureBlobstoreList lists all the files contained within an azure blobstore.
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]storage.Blob, error) {
// Create an authenticated client against the Azure cloud
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
if err != nil {
return nil, err
}
client := rawClient.GetBlobService()
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]azblob.BlobItem, error) {
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
service := azblob.NewServiceURL(*u, pipeline)
// List all the blobs from the container and return them
container := client.GetContainerReference(config.Container)
container := service.NewContainerURL(config.Container)
blobs, err := container.ListBlobs(storage.ListBlobsParameters{
res, err := container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{
MaxResults: 1024 * 1024 * 1024, // Yes, fetch all of them
Timeout: 3600, // Yes, wait for all of them
})
if err != nil {
return nil, err
}
return blobs.Blobs, nil
return res.Segment.BlobItems, nil
}
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
// from the blobstore.
func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []storage.Blob) error {
func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []azblob.BlobItem) error {
if *DryRunFlag {
for _, blob := range blobs {
fmt.Printf("would delete %s (%s) from %s/%s\n", blob.Name, blob.Properties.LastModified, config.Account, config.Container)
@ -95,15 +95,18 @@ func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []storage.Blob) err
return nil
}
// Create an authenticated client against the Azure cloud
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
if err != nil {
return err
}
client := rawClient.GetBlobService()
credential := azblob.NewSharedKeyCredential(config.Account, config.Token)
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", config.Account))
service := azblob.NewServiceURL(*u, pipeline)
container := service.NewContainerURL(config.Container)
// Iterate over the blobs and delete them
for _, blob := range blobs {
if err := client.DeleteBlob(config.Container, blob.Name, nil); err != nil {
blockblob := container.NewBlockBlobURL(blob.Name)
if _, err := blockblob.Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return err
}
}

255
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go generated vendored Executable file
View File

@ -0,0 +1,255 @@
package pipeline
import (
"context"
"net"
"net/http"
"os"
"time"
)
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
// requires that this Factory create a new instance of its Policy object.
type Factory interface {
New(next Policy, po *PolicyOptions) Policy
}
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
// New calls f(next,po).
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
return f(next, po)
}
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
// Response goes backward through the linked-list for additional processing.
// NOTE: Request is passed by value so changes do not change the caller's version of
// the request. However, Request has some fields that reference mutable objects (not strings).
// These references are copied; a deep copy is not performed. Specifically, this means that
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
type Policy interface {
Do(ctx context.Context, request Request) (Response, error)
}
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
// Do calls f(ctx, request).
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
return f(ctx, request)
}
// Options configures a Pipeline's behavior.
type Options struct {
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
Log LogOptions
}
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
// the LogLevel indicates the level of the log entry. The logger only records entries
// whose level is at least the level it was told to log. See the Log* constants.
// For example, if a logger is configured with LogError, then LogError, LogPanic,
// and LogFatal entries will be logged; lower level entries are ignored.
type LogLevel uint32
const (
// LogNone tells a logger not to log any entries passed to it.
LogNone LogLevel = iota
// LogFatal tells a logger to log all LogFatal entries passed to it.
LogFatal
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
LogPanic
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
LogError
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogWarning
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogInfo
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogDebug
)
// LogOptions configures the pipeline's logging mechanism & level filtering.
type LogOptions struct {
Log func(level LogLevel, message string)
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
// An application can return different values over the its lifetime; this allows the application to dynamically
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
// Usually, the function will be implemented simply like this: return level <= LogWarning
ShouldLog func(level LogLevel) bool
}
type pipeline struct {
factories []Factory
options Options
}
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
//
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
type Pipeline interface {
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
}
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
func NewPipeline(factories []Factory, o Options) Pipeline {
if o.HTTPSender == nil {
o.HTTPSender = newDefaultHTTPClientFactory()
}
if o.Log.Log == nil {
o.Log.Log = func(LogLevel, string) {} // No-op logger
}
return &pipeline{factories: factories, options: o}
}
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
// ultimately sends the transformed HTTP request over the network.
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
response, err := p.newPolicies(methodFactory).Do(ctx, request)
request.close()
return response, err
}
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
// The last Policy is the one that actually sends the request over the wire and gets the response.
// It is overridable via the Options' HTTPSender field.
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
next := p.options.HTTPSender.New(nil, po)
// Walk over the slice of Factory objects in reverse (from wire to API)
markers := 0
for i := len(p.factories) - 1; i >= 0; i-- {
factory := p.factories[i]
if _, ok := factory.(methodFactoryMarker); ok {
markers++
if markers > 1 {
panic("MethodFactoryMarker can only appear once in the pipeline")
}
if methodFactory != nil {
// Replace MethodFactoryMarker with passed-in methodFactory
next = methodFactory.New(next, po)
}
} else {
// Use the slice's Factory to construct its Policy
next = factory.New(next, po)
}
}
// Each Factory has created its Policy
if markers == 0 && methodFactory != nil {
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
}
return next // Return head of the Policy object linked-list
}
// A PolicyOptions represents optional information that can be used by a node in the
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
// uses the options to perform logging. But, in the future, this could be used for more.
type PolicyOptions struct {
pipeline *pipeline
}
// ShouldLog returns true if the specified log level should be logged.
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
if po.pipeline.options.Log.ShouldLog != nil {
return po.pipeline.options.Log.ShouldLog(level)
}
return false
}
// Log logs a string to the Pipeline's Logger.
func (po *PolicyOptions) Log(level LogLevel, msg string) {
if !po.ShouldLog(level) {
return // Short circuit message formatting if we're not logging it
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
po.pipeline.options.Log.Log(level, msg)
// If logger doesn't handle fatal/panic, we'll do it here.
if level == LogFatal {
os.Exit(1)
} else if level == LogPanic {
panic(msg)
}
}
var pipelineHTTPClient = newDefaultHTTPClient()
func newDefaultHTTPClient() *http.Client {
// We want the Transport to have a large connection pool
return &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
Dial /*Context*/ : (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).Dial, /*Context*/
MaxIdleConns: 0, // No limit
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: false,
DisableCompression: false,
MaxResponseHeaderBytes: 0,
//ResponseHeaderTimeout: time.Duration{},
//ExpectContinueTimeout: time.Duration{},
},
}
}
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
func newDefaultHTTPClientFactory() Factory {
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
return func(ctx context.Context, request Request) (Response, error) {
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
if err != nil {
err = NewError(err, "HTTP request failed")
}
return NewHTTPResponse(r), err
}
})
}
var mfm = methodFactoryMarker{} // Singleton
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
func MethodFactoryMarker() Factory {
return mfm
}
type methodFactoryMarker struct {
}
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
panic("methodFactoryMarker policy should have been replaced with a method policy")
}

View File

@ -0,0 +1,33 @@
// +build !windows,!nacl,!plan9
package pipeline
import (
"log"
"log/syslog"
)
// ForceLog should rarely be used. It forceable logs an entry to the
// Windows Event Log (on Windows) or to the SysLog (on Linux)
func ForceLog(level LogLevel, msg string) {
if defaultLogger == nil {
return // Return fast if we failed to create the logger.
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
switch level {
case LogFatal:
defaultLogger.Fatal(msg)
case LogPanic:
defaultLogger.Panic(msg)
case LogError, LogWarning, LogInfo:
defaultLogger.Print(msg)
}
}
var defaultLogger = func() *log.Logger {
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
return l
}()

View File

@ -0,0 +1,61 @@
package pipeline
import (
"os"
"syscall"
"unsafe"
)
// ForceLog should rarely be used. It forceable logs an entry to the
// Windows Event Log (on Windows) or to the SysLog (on Linux)
func ForceLog(level LogLevel, msg string) {
var el eventType
switch level {
case LogError, LogFatal, LogPanic:
el = elError
case LogWarning:
el = elWarning
case LogInfo:
el = elInfo
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
reportEvent(el, 0, msg)
}
type eventType int16
const (
elSuccess eventType = 0
elError eventType = 1
elWarning eventType = 2
elInfo eventType = 4
)
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
advAPI32 := syscall.MustLoadDLL("AdvAPI32.dll")
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
sourceName, _ := os.Executable()
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
if lastErr == nil { // On error, logging is a no-op
return func(eventType eventType, eventID int32, msg string) {}
}
reportEvent := advAPI32.MustFindProc("ReportEventW")
return func(eventType eventType, eventID int32, msg string) {
s, _ := syscall.UTF16PtrFromString(msg)
_, _, _ = reportEvent.Call(
uintptr(handle), // HANDLE hEventLog
uintptr(eventType), // WORD wType
uintptr(0), // WORD wCategory
uintptr(eventID), // DWORD dwEventID
uintptr(0), // PSID lpUserSid
uintptr(1), // WORD wNumStrings
uintptr(0), // DWORD dwDataSize
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
uintptr(0)) // LPVOID lpRawData
}
}()

161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go generated vendored Executable file
View File

@ -0,0 +1,161 @@
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package pipeline implements an HTTP request/response middleware pipeline whose
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
the request is sent over the wire.
Not all policy objects mutate an HTTP request; some policy objects simply impact the
flow of requests/responses by performing operations such as logging, retry policies,
timeouts, failure injection, and deserialization of response payloads.
Implementing the Policy Interface
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
to the code that initiated the original HTTP request.
Here is a template for how to define a pipeline.Policy object:
type myPolicy struct {
node PolicyNode
// TODO: Add configuration/setting fields here (if desired)...
}
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// TODO: Mutate/process the HTTP request here...
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
// TODO: Mutate/process the HTTP response here...
return response, err // Return response/error to previous Policy
}
Implementing the Factory Interface
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
Here is a template for how to define a pipeline.Policy object:
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
type myPolicyFactory struct {
// TODO: Add any configuration/setting fields if desired...
}
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
}
Using your Factory and Policy objects via a Pipeline
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
this slice to the pipeline.NewPipeline function.
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
or other objects that can simulate the network requests for testing purposes.
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
context.Context for cancelling the HTTP request (if desired).
type Pipeline interface {
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
}
Do iterates over the slice of Factory objects and tells each one to create its corresponding
Policy object. After the linked-list of Policy objects have been created, Do calls the first
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
The last Policy object sends the message over the network.
When the network operation completes, the HTTP response and error return values pass
back through the same Policy objects in reverse order. Most Policy objects ignore the
response/error but some log the result, retry the operation (depending on the exact
reason the operation failed), or deserialize the response's body. Your own Policy
objects can do whatever they like when processing outgoing requests or incoming responses.
Note that after an I/O request runs to completion, the Policy objects for that request
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
them to be created once and reused over many I/O operations. This allows for efficient use of
memory and also makes them safely usable by multiple goroutines concurrently.
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
While Pipeline and Factory objects can be reused over many different operations, it is
common to have special behavior for a specific operation/method. For example, a method
may need to deserialize the response's body to an instance of a specific data type.
To accommodate this, the Pipeline's Do method takes an additional method-specific
Factory object. The Do method tells this Factory to create a Policy object and
injects this method-specific Policy object into the linked-list of Policy objects.
When creating a Pipeline object, the slice of Factory objects passed must have 1
(and only 1) entry marking where the method-specific Factory should be injected.
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
func MethodFactoryMarker() pipeline.Factory
Creating an HTTP Request Object
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
To this function, you must pass a pipeline.RequestOptions that looks like this:
type RequestOptions struct {
// The readable and seekable stream to be sent to the server as the request's body.
Body io.ReadSeeker
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
Progress ProgressReceiver
}
The method and struct ensure that the request's body stream is a read/seekable stream.
A seekable stream is required so that upon retry, the final Policy object can seek
the stream back to the beginning before retrying the network request and re-uploading the
body. In addition, you can associate a ProgressReceiver callback function which will be
invoked periodically to report progress while bytes are being read from the body stream
and sent over the network.
Processing the HTTP Response
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
embedded in a struct that implements the pipeline.Response interface:
type Response interface {
Response() *http.Response
}
This interface is returned through all the Policy objects. Each Policy object can call the Response
interface's Response method to examine (or mutate) the embedded http.Response object.
A Policy object can internally define another struct (implementing the pipeline.Response interface)
that embeds an http.Response and adds additional fields and return this structure to other Policy
objects. This allows a Policy object to deserialize the body to some other struct and return the
original http.Response and the additional struct back through the Policy chain. Other Policy objects
can see the Response but cannot see the additional struct with the deserialized body. After all the
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
The caller of this method can perform a type assertion attempting to get back to the struct type
really returned by the Policy object. If the type assertion is successful, the caller now has
access to both the http.Response and the deserialized struct object.*/
package pipeline

121
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go generated vendored Executable file
View File

@ -0,0 +1,121 @@
package pipeline
import (
"fmt"
"runtime"
)
type causer interface {
Cause() error
}
// ErrorNode can be an embedded field in a private error object. This field
// adds Program Counter support and a 'cause' (reference to a preceding error).
// When initializing a error type with this embedded field, initialize the
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
type ErrorNode struct {
pc uintptr // Represents a Program Counter that you can get symbols for.
cause error // Refers to the preceding error (or nil)
}
// Error returns a string with the PC's symbols or "" if the PC is invalid.
// When defining a new error type, have its Error method call this one passing
// it the string representation of the error.
func (e *ErrorNode) Error(msg string) string {
s := ""
if fn := runtime.FuncForPC(e.pc); fn != nil {
file, line := fn.FileLine(e.pc)
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
}
s += msg + "\n\n"
if e.cause != nil {
s += e.cause.Error() + "\n"
}
return s
}
// Cause returns the error that preceded this error.
func (e *ErrorNode) Cause() error { return e.cause }
// Temporary returns true if the error occurred due to a temporary condition.
func (e ErrorNode) Temporary() bool {
type temporary interface {
Temporary() bool
}
for err := e.cause; err != nil; {
if t, ok := err.(temporary); ok {
return t.Temporary()
}
if cause, ok := err.(causer); ok {
err = cause.Cause()
} else {
err = nil
}
}
return false
}
// Timeout returns true if the error occurred due to time expiring.
func (e ErrorNode) Timeout() bool {
type timeout interface {
Timeout() bool
}
for err := e.cause; err != nil; {
if t, ok := err.(timeout); ok {
return t.Timeout()
}
if cause, ok := err.(causer); ok {
err = cause.Cause()
} else {
err = nil
}
}
return false
}
// Initialize is used to initialize an embedded ErrorNode field.
// It captures the caller's program counter and saves the cause (preceding error).
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
// value of 3 is very common; but, depending on your code nesting, you may need
// a different value.
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
// Get the PC of Initialize method's caller.
pc := [1]uintptr{}
_ = runtime.Callers(callersToSkip, pc[:])
return ErrorNode{pc: pc[0], cause: cause}
}
// Cause walks all the preceding errors and return the originating error.
func Cause(err error) error {
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}
// NewError creates a simple string error (like Error.New). But, this
// error also captures the caller's Program Counter and the preceding error.
func NewError(cause error, msg string) error {
return &pcError{
ErrorNode: ErrorNode{}.Initialize(cause, 3),
msg: msg,
}
}
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
type pcError struct {
ErrorNode
msg string
}
// Error satisfies the error interface. It shows the error with Program Counter
// symbols and calls Error on the preceding error so you can see the full error chain.
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }

View File

@ -0,0 +1,82 @@
package pipeline
import "io"
// ********** The following is common between the request body AND the response body.
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
type ProgressReceiver func(bytesTransferred int64)
// ********** The following are specific to the request body (a ReadSeekCloser)
// This struct is used when sending a body to the network
type requestBodyProgress struct {
requestBody io.ReadSeeker // Seeking is required to support retries
pr ProgressReceiver
}
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
if pr == nil {
panic("pr must not be nil")
}
return &requestBodyProgress{requestBody: requestBody, pr: pr}
}
// Read reads a block of data from an inner stream and reports progress
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
n, err = rbp.requestBody.Read(p)
if err != nil {
return
}
// Invokes the user's callback method to report progress
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
if err != nil {
panic(err)
}
rbp.pr(position)
return
}
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return rbp.requestBody.Seek(offset, whence)
}
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
func (rbp *requestBodyProgress) Close() error {
if c, ok := rbp.requestBody.(io.Closer); ok {
return c.Close()
}
return nil
}
// ********** The following are specific to the response body (a ReadCloser)
// This struct is used when sending a body to the network
type responseBodyProgress struct {
responseBody io.ReadCloser
pr ProgressReceiver
offset int64
}
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
if pr == nil {
panic("pr must not be nil")
}
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
}
// Read reads a block of data from an inner stream and reports progress
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
n, err = rbp.responseBody.Read(p)
rbp.offset += int64(n)
// Invokes the user's callback method to report progress
rbp.pr(rbp.offset)
return
}
func (rbp *responseBodyProgress) Close() error {
return rbp.responseBody.Close()
}

147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go generated vendored Executable file
View File

@ -0,0 +1,147 @@
package pipeline
import (
"io"
"net/http"
"net/url"
"strconv"
)
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
type Request struct {
*http.Request
}
// NewRequest initializes a new HTTP request object with any desired options.
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
request.Request = &http.Request{
Method: method,
URL: &url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: url.Host,
}
if body != nil {
err = request.SetBody(body)
}
return
}
// SetBody sets the body and content length, assumes body is not nil.
func (r Request) SetBody(body io.ReadSeeker) error {
size, err := body.Seek(0, io.SeekEnd)
if err != nil {
return err
}
body.Seek(0, io.SeekStart)
r.ContentLength = size
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
if size != 0 {
r.Body = &retryableRequestBody{body: body}
r.GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
return r.Body, nil
}
} else {
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
r.Body = http.NoBody
r.GetBody = func() (io.ReadCloser, error) {
return http.NoBody, nil
}
// close the user-provided empty body
if c, ok := body.(io.Closer); ok {
c.Close()
}
}
return nil
}
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
func (r Request) Copy() Request {
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
}
copy := *r.Request // Copy the request
urlCopy := *(r.Request.URL) // Copy the URL
copy.URL = &urlCopy
copy.Header = http.Header{} // Copy the header
for k, vs := range r.Header {
for _, value := range vs {
copy.Header.Add(k, value)
}
}
return Request{Request: &copy} // Return the copy
}
func (r Request) close() error {
if r.Body != nil && r.Body != http.NoBody {
c, ok := r.Body.(*retryableRequestBody)
if !ok {
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
}
return c.realClose()
}
return nil
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
func (r Request) RewindBody() error {
if r.Body != nil && r.Body != http.NoBody {
s, ok := r.Body.(io.Seeker)
if !ok {
panic("unexpected request body type (should be io.Seeker)")
}
// Reset the stream back to the beginning
_, err := s.Seek(0, io.SeekStart)
return err
}
return nil
}
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
// This struct is used when sending a body to the network
type retryableRequestBody struct {
body io.ReadSeeker // Seeking is required to support retries
}
// Read reads a block of data from an inner stream and reports progress
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
return b.body.Read(p)
}
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return b.body.Seek(offset, whence)
}
func (b *retryableRequestBody) Close() error {
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
// The pipeline closes the request body upon success.
return nil
}
func (b *retryableRequestBody) realClose() error {
if c, ok := b.body.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@ -0,0 +1,74 @@
package pipeline
import (
"bytes"
"fmt"
"net/http"
"sort"
"strings"
)
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
// to the expected struct and returns the struct to its caller.
type Response interface {
Response() *http.Response
}
// This is the default struct that has the http.Response.
// A method can replace this struct with its own struct containing an http.Response
// field and any other additional fields.
type httpResponse struct {
response *http.Response
}
// NewHTTPResponse is typically called by a Policy object to return a Response object.
func NewHTTPResponse(response *http.Response) Response {
return &httpResponse{response: response}
}
// This method satisfies the public Response interface's Response method
func (r httpResponse) Response() *http.Response {
return r.response
}
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
// Write the request into the buffer.
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
writeHeader(b, request.Header)
if response != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
writeHeader(b, response.Header)
}
if err != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
}
}
// formatHeaders appends an HTTP request's or response's header into a Buffer.
func writeHeader(b *bytes.Buffer, header map[string][]string) {
if len(header) == 0 {
b.WriteString(" (no headers)\n")
return
}
keys := make([]string, 0, len(header))
// Alphabetize the headers
for k := range header {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// Redact the value of any Authorization header to prevent security information from persisting in logs
value := interface{}("REDACTED")
if !strings.EqualFold(k, "Authorization") {
value = header[k]
}
fmt.Fprintf(b, " %s: %+v\n", k, value)
}
}

View File

@ -0,0 +1,9 @@
package pipeline
const (
// UserAgent is the string to be used in the user agent string when making requests.
UserAgent = "azure-pipeline-go/" + Version
// Version is the semantic version (see http://semver.org) of the pipeline package.
Version = "0.1.0"
)

View File

@ -0,0 +1,67 @@
package azblob
import (
"time"
)
// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set.
type HTTPAccessConditions struct {
IfModifiedSince time.Time
IfUnmodifiedSince time.Time
IfMatch ETag
IfNoneMatch ETag
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
if !ac.IfModifiedSince.IsZero() {
ims = &ac.IfModifiedSince
}
if !ac.IfUnmodifiedSince.IsZero() {
ius = &ac.IfUnmodifiedSince
}
if ac.IfMatch != ETagNone {
ime = &ac.IfMatch
}
if ac.IfNoneMatch != ETagNone {
inme = &ac.IfNoneMatch
}
return
}
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
type ContainerAccessConditions struct {
HTTPAccessConditions
LeaseAccessConditions
}
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
type BlobAccessConditions struct {
HTTPAccessConditions
LeaseAccessConditions
AppendBlobAccessConditions
PageBlobAccessConditions
}
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
type LeaseAccessConditions struct {
LeaseID string
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac LeaseAccessConditions) pointers() (leaseID *string) {
if ac.LeaseID != "" {
leaseID = &ac.LeaseID
}
return
}
/*
// getInt32 is for internal infrastructure. It is used with access condition values where
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
// and the privately-storage field in the access condition object is stored as +1 higher than desired.
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
func getInt32(value int32) (bool, int32) {
return value > 0, value - 1
}
*/

View File

@ -0,0 +1,79 @@
package azblob
import "sync/atomic"
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadInt32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadInt64(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadUint64(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) {
return morphResult
}
}
}

View File

@ -0,0 +1,510 @@
package azblob
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"bytes"
"os"
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// CommonResponseHeaders returns the headers common to all blob REST API responses.
type CommonResponse interface {
// ETag returns the value for header ETag.
ETag() ETag
// LastModified returns the value for header Last-Modified.
LastModified() time.Time
// RequestID returns the value for header x-ms-request-id.
RequestID() string
// Date returns the value for header Date.
Date() time.Time
// Version returns the value for header x-ms-version.
Version() string
// Response returns the raw HTTP response object.
Response() *http.Response
}
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
type UploadToBlockBlobOptions struct {
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
Progress pipeline.ProgressReceiver
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
BlobHTTPHeaders BlobHTTPHeaders
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
Metadata Metadata
// AccessConditions indicates the access conditions for the block blob.
AccessConditions BlobAccessConditions
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
Parallelism uint16
}
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
// Validate parameters and set defaults
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
}
if o.BlockSize == 0 {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
}
size := int64(len(b))
if size <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = bytes.NewReader(b)
if o.Progress != nil {
body = pipeline.NewRequestBodyProgress(body, o.Progress)
}
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
}
var numBlocks = uint16(((size - 1) / o.BlockSize) + 1)
if numBlocks > BlockBlobMaxBlocks {
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
}
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := doBatchTransfer(ctx, batchTransferOptions{
operationName: "UploadBufferToBlockBlob",
transferSize: size,
chunkSize: o.BlockSize,
parallelism: o.Parallelism,
operation: func(offset int64, count int64) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count])
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = pipeline.NewRequestBodyProgress(body,
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets a progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions)
return err
},
})
if err != nil {
return nil, err
}
// All put blocks were successful, call Put Block List to finalize the blob
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
}
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
func UploadFileToBlockBlob(ctx context.Context, file *os.File,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
stat, err := file.Stat()
if err != nil {
return nil, err
}
m := mmf{} // Default to an empty slice; used for 0-size file
if stat.Size() != 0 {
m, err = newMMF(file, false, 0, int(stat.Size()))
if err != nil {
return nil, err
}
defer m.unmap()
}
return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o)
}
///////////////////////////////////////////////////////////////////////////////
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
type DownloadFromBlobOptions struct {
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress pipeline.ProgressReceiver
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions BlobAccessConditions
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
Parallelism uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions,
initialDownloadResponse *DownloadResponse) error {
// Validate parameters, and set defaults.
if o.BlockSize < 0 {
panic("BlockSize option must be >= 0")
}
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}
if offset < 0 {
panic("offset option must be >= 0")
}
if count < 0 {
panic("count option must be >= 0")
}
if count == CountToEnd { // If size not specified, calculate it
if initialDownloadResponse != nil {
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
} else {
// If we don't have the length at all, get it
dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false)
if err != nil {
return err
}
count = dr.ContentLength() - offset
}
}
if int64(len(b)) < count {
panic(fmt.Errorf("the buffer's size should be equal to or larger than the request count of bytes: %d", count))
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := doBatchTransfer(ctx, batchTransferOptions{
operationName: "downloadBlobToBuffer",
transferSize: count,
chunkSize: o.BlockSize,
parallelism: o.Parallelism,
operation: func(chunkStart int64, count int64) error {
dr, err := blobURL.Download(ctx, chunkStart+ offset, count, ac, false)
body := dr.Body(o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = pipeline.NewResponseBodyProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.ReadFull(body, b[chunkStart:chunkStart+count])
body.Close()
return err
},
})
if err != nil {
return err
}
return nil
}
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions) error {
return downloadBlobToBuffer(ctx, blobURL, offset, count, ac, b, o, nil)
}
// DownloadBlobToFile downloads an Azure file to a local file.
// The file would be truncated if the size doesn't match.
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error {
// 1. Validate parameters.
if file == nil {
panic("file must not be nil")
}
// 2. Calculate the size of the destination file
var size int64
if count == CountToEnd {
// Try to get Azure file's size
props, err := blobURL.GetProperties(ctx, ac)
if err != nil {
return err
}
size = props.ContentLength() - offset
} else {
size = count
}
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
stat, err := file.Stat()
if err != nil {
return err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return err
}
}
if size > 0 {
// 4. Set mmap and call DownloadAzureFileToBuffer.
m, err := newMMF(file, true, 0, int(size))
if err != nil {
return err
}
defer m.unmap()
return downloadBlobToBuffer(ctx, blobURL, offset, size, ac, m, o, nil)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
}
///////////////////////////////////////////////////////////////////////////////
// BatchTransferOptions identifies options used by doBatchTransfer.
type batchTransferOptions struct {
transferSize int64
chunkSize int64
parallelism uint16
operation func(offset int64, chunkSize int64) error
operationName string
}
// doBatchTransfer helps to execute operations in a batch manner.
func doBatchTransfer(ctx context.Context, o batchTransferOptions) error {
// Prepare and do parallel operations.
numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1)
operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create the goroutines that process each operation (in parallel).
if o.parallelism == 0 {
o.parallelism = 5 // default parallelism
}
for g := uint16(0); g < o.parallelism; g++ {
//grIndex := g
go func() {
for f := range operationChannel {
//fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
err := f()
operationResponseChannel <- err
//fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
}
}()
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
curChunkSize := o.chunkSize
if chunkNum == numChunks-1 { // Last chunk
curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.chunkSize
operationChannel <- func() error {
return o.operation(offset, curChunkSize)
}
}
close(operationChannel)
// Wait for the operations to complete.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
responseError := <-operationResponseChannel
if responseError != nil {
cancel() // As soon as any operation fails, cancel all remaining operation calls
return responseError // No need to process anymore responses
}
}
return nil
}
////////////////////////////////////////////////////////////////////////////////////////////////
type UploadStreamToBlockBlobOptions struct {
BufferSize int
MaxBuffers int
BlobHTTPHeaders BlobHTTPHeaders
Metadata Metadata
AccessConditions BlobAccessConditions
}
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
result, err := uploadStream(ctx, reader,
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
return result.(CommonResponse), err
}
type uploadStreamToBlockBlobOptions struct {
b BlockBlobURL
o UploadStreamToBlockBlobOptions
blockIDPrefix uuid // UUID used with all blockIDs
maxBlockNum uint32 // defaults to 0
firstBlock []byte // Used only if maxBlockNum is 0
}
func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) {
return nil, nil
}
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
if num == 0 && len(buffer) < t.o.BufferSize {
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
t.firstBlock = buffer
return nil
}
// Else, upload a staged block...
AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
if startVal < num {
return num, nil
}
return startVal, nil
})
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{})
return err
}
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
if t.maxBlockNum == 0 {
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
}
// Multiple blocks staged, commit them all now
blockID := newUuidBlockID(t.blockIDPrefix)
blockIDs := make([]string, t.maxBlockNum + 1)
for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
}
return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
type iTransfer interface {
start(ctx context.Context) (interface{}, error)
chunk(ctx context.Context, num uint32, buffer []byte) error
end(ctx context.Context) (interface{}, error)
}
type UploadStreamOptions struct {
MaxBuffers int
BufferSize int
}
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
defer cancel()
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
type OutgoingMsg struct {
chunkNum uint32
buffer []byte
}
// Create a channel to hold the buffers usable for incoming datsa
incoming := make(chan []byte, o.MaxBuffers)
outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
if result, err := t.start(ctx); err != nil {
return result, err
}
numBuffers := 0 // The number of buffers & out going goroutines created so far
injectBuffer := func() {
// For each Buffer, create it and a goroutine to upload it
incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
numBuffers++
go func() {
for outgoingMsg := range outgoing {
// Upload the outgoing buffer
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
wg.Done() // Indicate this buffer was sent
if nil != err {
cancel()
}
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now
}
}()
}
injectBuffer() // Create our 1st buffer & outgoing goroutine
// This goroutine grabs a buffer, reads from the stream into the buffer,
// and inserts the buffer into the outgoing channel to be uploaded
for c := uint32(0); true; c++ { // Iterate once per chunk
var buffer []byte
if numBuffers < o.MaxBuffers {
select {
// We're not at max buffers, see if a previously-created buffer is available
case buffer = <-incoming:
break
default:
// No buffer available; inject a new buffer & go routine to process it
injectBuffer()
buffer = <-incoming // Grab the just-injected buffer
}
} else {
// We are at max buffers, block until we get to reuse one
buffer = <-incoming
}
n, err := io.ReadFull(reader, buffer)
if err != nil {
buffer = buffer[:n] // Make slice match the # of read bytes
}
if len(buffer) > 0 {
// Buffer not empty, upload it
wg.Add(1) // We're posting a buffer to be sent
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
}
if err != nil { // The reader is done, no more outgoing buffers
break
}
}
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
wg.Wait() // Wait for all pending outgoing messages to complete
// After all blocks uploaded, commit them to the blob & return the result
return t.end(ctx)
}

View File

@ -0,0 +1,111 @@
package azblob
import (
"net/url"
"strings"
)
const (
snapshot = "snapshot"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
)
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct {
Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net"
ContainerName string // "" if no container
BlobName string // "" if no blob
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
}
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
func NewBlobURLParts(u url.URL) BlobURLParts {
up := BlobURLParts{
Scheme: u.Scheme,
Host: u.Host,
}
// Find the container & blob names (if any)
if u.Path != "" {
path := u.Path
if path[0] == '/' {
path = path[1:] // If path starts with a slash, remove it
}
// Find the next slash (if it exists)
containerEndIndex := strings.Index(path, "/")
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
up.ContainerName = path
} else {
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
}
}
// Convert the query parameters to a case-sensitive map & trim whitespace
paramsMap := u.Query()
up.Snapshot = "" // Assume no snapshot
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
up.Snapshot = snapshotStr[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, snapshot)
}
up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode()
return up
}
type caseInsensitiveValues url.Values // map[string][]string
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
key = strings.ToLower(key)
for k, v := range values {
if strings.ToLower(k) == key {
return v, true
}
}
return []string{}, false
}
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
// field contains the SAS, snapshot, and unparsed query parameters.
func (up BlobURLParts) URL() url.URL {
path := ""
// Concatenate container & blob names (if they exist)
if up.ContainerName != "" {
path += "/" + up.ContainerName
if up.BlobName != "" {
path += "/" + up.BlobName
}
}
rawQuery := up.UnparsedParams
// Concatenate blob snapshot query parameter (if it exists)
if up.Snapshot != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += snapshot + "=" + up.Snapshot
}
sas := up.SAS.Encode()
if sas != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += sas
}
u := url.URL{
Scheme: up.Scheme,
Host: up.Host,
Path: path,
RawQuery: rawQuery,
}
return u
}

View File

@ -0,0 +1,206 @@
package azblob
import (
"bytes"
"fmt"
"strings"
"time"
)
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
type BlobSASSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
IPRange IPRange `param:"sip"`
Identifier string `param:"si"`
ContainerName string
BlobName string // Use "" to create a Container SAS
CacheControl string // rscc
ContentDisposition string // rscd
ContentEncoding string // rsce
ContentLanguage string // rscl
ContentType string // rsct
}
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
if sharedKeyCredential == nil {
panic("sharedKeyCredential can't be nil")
}
resource := "c"
if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
}
v.Permissions = perms.String()
} else {
resource = "b"
// Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
}
v.Permissions = perms.String()
}
if v.Version == "" {
v.Version = SASVersion
}
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
stringToSign := strings.Join([]string{
v.Permissions,
startTime,
expiryTime,
getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName),
v.Identifier,
v.IPRange.String(),
string(v.Protocol),
v.Version,
v.CacheControl, // rscc
v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
v.ContentLanguage, // rscl
v.ContentType}, // rsct
"\n")
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
p := SASQueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Container/Blob-specific SAS parameters
resource: resource,
identifier: v.Identifier,
// Calculated SAS signature
signature: signature,
}
return p
}
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
func getCanonicalName(account string, containerName string, blobName string) string {
// Container: "/blob/account/containername"
// Blob: "/blob/account/containername/blobname"
elements := []string{"/blob/", account, "/", containerName}
if blobName != "" {
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
}
return strings.Join(elements, "")
}
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type ContainerSASPermissions struct {
Read, Add, Create, Write, Delete, List bool
}
// String produces the SAS permissions string for an Azure Storage container.
// Call this method to set BlobSASSignatureValues's Permissions field.
func (p ContainerSASPermissions) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
if p.List {
b.WriteRune('l')
}
return b.String()
}
// Parse initializes the ContainerSASPermissions's fields from a string.
func (p *ContainerSASPermissions) Parse(s string) error {
*p = ContainerSASPermissions{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
default:
return fmt.Errorf("Invalid permission: '%v'", r)
}
}
return nil
}
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
// String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSASSignatureValues's Permissions field.
func (p BlobSASPermissions) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
return b.String()
}
// Parse initializes the BlobSASPermissions's fields from a string.
func (p *BlobSASPermissions) Parse(s string) error {
*p = BlobSASPermissions{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
default:
return fmt.Errorf("Invalid permission: '%v'", r)
}
}
return nil
}

View File

@ -0,0 +1,195 @@
package azblob
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
// ServiceCode values indicate a service failure.
const (
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet"
// ServiceCodeBlobAlreadyExists means the specified blob already exists.
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists"
// ServiceCodeBlobNotFound means the specified blob does not exist.
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound"
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten"
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength"
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit"
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong"
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier"
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
// Examine the HTTP status code and message for more information about the failure.
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource"
// ServiceCodeContainerAlreadyExists means the specified container already exists.
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists"
// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted"
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled"
// ServiceCodeContainerNotFound means the specified container does not exist.
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound"
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit"
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported"
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch"
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
// that the operation for AppendBlob requires at least version 2015-02-21.
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch"
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired"
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock"
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType"
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId"
// ServiceCodeInvalidBlockList means the specified block list is invalid.
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList"
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation"
// ServiceCodeInvalidPageRange means the page range specified is invalid.
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange"
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType"
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl"
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation"
// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent"
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken"
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation"
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation"
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation"
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing"
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired"
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged"
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed"
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost"
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation"
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation"
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation"
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet"
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation"
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob"
// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation"
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer"
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound"
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported"
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet"
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge"
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded"
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded"
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent"
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet"
// ServiceCodeSystemInUse means this blob is in use by the system.
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse"
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet"
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite"
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated"
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived"
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived"
)

View File

@ -0,0 +1,112 @@
package azblob
import (
"context"
"io"
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline"
)
const (
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
AppendBlobMaxBlocks = 50000
)
// AppendBlobURL defines a set of operations applicable to append blobs.
type AppendBlobURL struct {
BlobURL
abClient appendBlobClient
}
// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL {
blobClient := newBlobClient(url, p)
abClient := newAppendBlobClient(url, p)
return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient}
}
// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL {
return NewAppendBlobURL(ab.blobClient.URL(), p)
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
p := NewBlobURLParts(ab.URL())
p.Snapshot = snapshot
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
}
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
return ab.abClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers()
return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// AppendBlobAccessConditions identifies append blob-specific access conditions which you optionally set.
type AppendBlobAccessConditions struct {
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
// only if the append position is equal to a value.
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
IfAppendPositionEqual int64
// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
// only if the append blob's size is less than or equal to a value.
// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
IfMaxSizeLessThanOrEqual int64
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac AppendBlobAccessConditions) pointers() (iape *int64, imsltoe *int64) {
if ac.IfAppendPositionEqual < -1 {
panic("IfAppendPositionEqual can't be less than -1")
}
if ac.IfMaxSizeLessThanOrEqual < -1 {
panic("IfMaxSizeLessThanOrEqual can't be less than -1")
}
var zero int64 // defaults to 0
switch ac.IfAppendPositionEqual {
case -1:
iape = &zero
case 0:
iape = nil
default:
iape = &ac.IfAppendPositionEqual
}
switch ac.IfMaxSizeLessThanOrEqual {
case -1:
imsltoe = &zero
case 0:
imsltoe = nil
default:
imsltoe = &ac.IfMaxSizeLessThanOrEqual
}
return
}

View File

@ -0,0 +1,222 @@
package azblob
import (
"context"
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type BlobURL struct {
blobClient blobClient
}
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
return BlobURL{blobClient: blobClient}
}
// URL returns the URL endpoint used by the BlobURL object.
func (b BlobURL) URL() url.URL {
return b.blobClient.URL()
}
// String returns the URL as a string.
func (b BlobURL) String() string {
u := b.URL()
return u.String()
}
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
return NewBlobURL(b.blobClient.URL(), p)
}
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
p := NewBlobURLParts(b.URL())
p.Snapshot = snapshot
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
}
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
}
// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
func (b BlobURL) ToBlockBlobURL() BlockBlobURL {
return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline())
}
// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
func (b BlobURL) ToPageBlobURL() PageBlobURL {
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
}
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
var xRangeGetContentMD5 *bool
if rangeGetContentMD5 {
xRangeGetContentMD5 = &rangeGetContentMD5
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
dr, err := b.blobClient.Download(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
if err != nil {
return nil, err
}
return &DownloadResponse{
b: b,
r: dr,
ctx: ctx,
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()},
}, err
}
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
return b.blobClient.Undelete(ctx, nil, nil)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil)
}
// GetBlobProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// SetBlobHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.SetHTTPHeaders(ctx, nil,
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
&h.ContentDisposition, nil)
}
// SetBlobMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
// performance hit.
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
}
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
// 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.RenewLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// ReleaseLease releases the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
const LeaseBreakNaturally = -1
func leasePeriodPointer(period int32) (p *int32) {
if period != LeaseBreakNaturally {
p = &period
}
return nil
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
srcLeaseID := srcac.LeaseAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, srcLeaseID, nil)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) {
return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil)
}

View File

@ -0,0 +1,157 @@
package azblob
import (
"context"
"io"
"net/url"
"encoding/base64"
"encoding/binary"
"github.com/Azure/azure-pipeline-go/pipeline"
)
const (
// BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000
)
// BlockBlobURL defines a set of operations applicable to block blobs.
type BlockBlobURL struct {
BlobURL
bbClient blockBlobClient
}
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
bbClient := newBlockBlobClient(url, p)
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
}
// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL {
return NewBlockBlobURL(bb.blobClient.URL(), p)
}
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
p := NewBlobURLParts(bb.URL())
p.Snapshot = snapshot
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
}
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobStageBlockResponse, error) {
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
sourceURLStr := sourceURL.String()
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, &sourceURLStr, httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
type BlockID [64]byte
func (blockID BlockID) ToBase64() string {
return base64.StdEncoding.EncodeToString(blockID[:])
}
func (blockID *BlockID) FromBase64(s string) error {
*blockID = BlockID{} // Zero out the block ID
_, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s))
return err
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
type uuidBlockID BlockID
func (ubi uuidBlockID) UUID() uuid {
u := uuid{}
copy(u[:], ubi[:len(u)])
return u
}
func (ubi uuidBlockID) Number() uint32 {
return binary.BigEndian.Uint32(ubi[len(uuid{}):])
}
func newUuidBlockID(u uuid) uuidBlockID {
ubi := uuidBlockID{} // Create a new uuidBlockID
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
// Block number defaults to 0
return ubi
}
func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID {
copy(ubi[:len(u)], u[:])
return ubi
}
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
return ubi // Return the passed-in copy
}
func (ubi uuidBlockID) ToBase64() string {
return BlockID(ubi).ToBase64()
}
func (ubi *uuidBlockID) FromBase64(s string) error {
return (*BlockID)(ubi).FromBase64(s)
}

View File

@ -0,0 +1,300 @@
package azblob
import (
"bytes"
"context"
"fmt"
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type ContainerURL struct {
client containerClient
}
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
if p == nil {
panic("p can't be nil")
}
client := newContainerClient(url, p)
return ContainerURL{client: client}
}
// URL returns the URL endpoint used by the ContainerURL object.
func (c ContainerURL) URL() url.URL {
return c.client.URL()
}
// String returns the URL as a string.
func (c ContainerURL) String() string {
u := c.URL()
return u.String()
}
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
return NewContainerURL(c.URL(), p)
}
// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
// NewBlobURL method.
func (c ContainerURL) NewBlobURL(blobName string) BlobURL {
blobURL := appendToURLPath(c.URL(), blobName)
return NewBlobURL(blobURL, c.client.Pipeline())
}
// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
// NewAppendBlobURL method.
func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL {
blobURL := appendToURLPath(c.URL(), blobName)
return NewAppendBlobURL(blobURL, c.client.Pipeline())
}
// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
// NewBlockBlobURL method.
func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL {
blobURL := appendToURLPath(c.URL(), blobName)
return NewBlockBlobURL(blobURL, c.client.Pipeline())
}
// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
// NewPageBlobURL method.
func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
blobURL := appendToURLPath(c.URL(), blobName)
return NewPageBlobURL(blobURL, c.client.Pipeline())
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, nil)
}
// GetProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
return c.client.GetProperties(ctx, nil, ac.pointers(), nil)
}
// SetMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
}
ifModifiedSince, _, _, _ := ac.HTTPAccessConditions.pointers()
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
}
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) {
return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil)
}
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
type AccessPolicyPermission struct {
Read, Add, Create, Write, Delete, List bool
}
// String produces the access policy permission string for an Azure Storage container.
// Call this method to set AccessPolicy's Permission field.
func (p AccessPolicyPermission) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
if p.List {
b.WriteRune('l')
}
return b.String()
}
// Parse initializes the AccessPolicyPermission's fields from a string.
func (p *AccessPolicyPermission) Parse(s string) error {
*p = AccessPolicyPermission{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
default:
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
}
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
}
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, nil)
}
// RenewLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}
// ReleaseLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}
// BreakLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainerBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
}
// ChangeLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}
// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
prefix, include, maxResults := o.pointers()
return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
}
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
if o.Details.Snapshots {
panic("snapshots are not supported in this listing operation")
}
prefix, include, maxResults := o.pointers()
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
}
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
type ListBlobsSegmentOptions struct {
Details BlobListingDetails // No IncludeType header is produced if ""
Prefix string // No Prefix header is produced if ""
// SetMaxResults sets the maximum desired results you want the service to return. Note, the
// service may return fewer results than requested.
// MaxResults=0 means no 'MaxResults' header specified.
MaxResults int32
}
func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) {
if o.Prefix != "" {
prefix = &o.Prefix
}
include = o.Details.slice()
if o.MaxResults != 0 {
if o.MaxResults < 0 {
panic("MaxResults must be >= 0")
}
maxResults = &o.MaxResults
}
return
}
// BlobListingDetails indicates what additional information the service should return with each blob.
type BlobListingDetails struct {
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool
}
// string produces the Include query parameter's value.
func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
items := []ListBlobsIncludeItemType{}
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
if d.Copy {
items = append(items, ListBlobsIncludeItemCopy)
}
if d.Deleted {
items = append(items, ListBlobsIncludeItemDeleted)
}
if d.Metadata {
items = append(items, ListBlobsIncludeItemMetadata)
}
if d.Snapshots {
items = append(items, ListBlobsIncludeItemSnapshots)
}
if d.UncommittedBlobs {
items = append(items, ListBlobsIncludeItemUncommittedblobs)
}
return items
}

View File

@ -0,0 +1,236 @@
package azblob
import (
"context"
"fmt"
"io"
"net/url"
"strconv"
"github.com/Azure/azure-pipeline-go/pipeline"
)
const (
// PageBlobPageBytes indicates the number of bytes in a page (512).
PageBlobPageBytes = 512
// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
)
// PageBlobURL defines a set of operations applicable to page blobs.
type PageBlobURL struct {
BlobURL
pbClient pageBlobClient
}
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
pbClient := newPageBlobClient(url, p)
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
}
// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL {
return NewPageBlobURL(pb.blobClient.URL(), p)
}
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
p := NewBlobURLParts(pb.URL())
p.Snapshot = snapshot
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
}
// CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobUploadPagesResponse, error) {
count := validateSeekableStreamAt0AndGetCount(body)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
return pb.pbClient.UploadPages(ctx, body, count, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobClearPagesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
return pb.pbClient.ClearPages(ctx, 0, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.GetPageRanges(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil)
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
if size%PageBlobPageBytes != 0 {
panic("Size must be a multiple of PageBlobPageBytes (512)")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// SetSequenceNumber sets the page blob's sequence number.
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0")
}
sn := &sequenceNumber
if action == SequenceNumberActionIncrement {
sn = nil
}
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
sn, nil)
}
// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
qp := source.Query()
qp.Set("snapshot", snapshot)
source.RawQuery = qp.Encode()
return pb.pbClient.CopyIncremental(ctx, source.String(), nil, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
func (pr PageRange) pointers() *string {
if pr.Start < 0 {
panic("PageRange's Start value must be greater than or equal to 0")
}
if pr.End <= 0 {
panic("PageRange's End value must be greater than 0")
}
if pr.Start%PageBlobPageBytes != 0 {
panic("PageRange's Start value must be a multiple of 512")
}
if pr.End%PageBlobPageBytes != (PageBlobPageBytes - 1) {
panic("PageRange's End value must be 1 less than a multiple of 512")
}
if pr.End <= pr.Start {
panic("PageRange's End value must be after the start")
}
endOffset := strconv.FormatInt(int64(pr.End), 10)
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
return &asString
}
// PageBlobAccessConditions identifies page blob-specific access conditions which you optionally set.
type PageBlobAccessConditions struct {
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
// only if the blob's sequence number is less than a value.
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
IfSequenceNumberLessThan int64
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
// only if the blob's sequence number is less than or equal to a value.
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
IfSequenceNumberLessThanOrEqual int64
// IfSequenceNumberEqual ensures that the page blob operation succeeds
// only if the blob's sequence number is equal to a value.
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
IfSequenceNumberEqual int64
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
if ac.IfSequenceNumberLessThan < -1 {
panic("Ifsequencenumberlessthan can't be less than -1")
}
if ac.IfSequenceNumberLessThanOrEqual < -1 {
panic("IfSequenceNumberLessThanOrEqual can't be less than -1")
}
if ac.IfSequenceNumberEqual < -1 {
panic("IfSequenceNumberEqual can't be less than -1")
}
var zero int64 // Defaults to 0
switch ac.IfSequenceNumberLessThan {
case -1:
snlt = &zero
case 0:
snlt = nil
default:
snlt = &ac.IfSequenceNumberLessThan
}
switch ac.IfSequenceNumberLessThanOrEqual {
case -1:
snltoe = &zero
case 0:
snltoe = nil
default:
snltoe = &ac.IfSequenceNumberLessThanOrEqual
}
switch ac.IfSequenceNumberEqual {
case -1:
sne = &zero
case 0:
sne = nil
default:
sne = &ac.IfSequenceNumberEqual
}
return
}

View File

@ -0,0 +1,140 @@
package azblob
import (
"context"
"net/url"
"strings"
"github.com/Azure/azure-pipeline-go/pipeline"
)
const (
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
ContainerNameRoot = "$root"
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
ContainerNameLogs = "$logs"
)
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
type ServiceURL struct {
client serviceClient
}
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
if p == nil {
panic("p can't be nil")
}
client := newServiceClient(primaryURL, p)
return ServiceURL{client: client}
}
// URL returns the URL endpoint used by the ServiceURL object.
func (s ServiceURL) URL() url.URL {
return s.client.URL()
}
// String returns the URL as a string.
func (s ServiceURL) String() string {
u := s.URL()
return u.String()
}
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
return NewServiceURL(s.URL(), p)
}
// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
// NewContainerURL method.
func (s ServiceURL) NewContainerURL(containerName string) ContainerURL {
containerURL := appendToURLPath(s.URL(), containerName)
return NewContainerURL(containerURL, s.client.Pipeline())
}
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
func appendToURLPath(u url.URL, name string) url.URL {
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
// When you call url.Parse() this is what you'll get:
// Scheme: "https"
// Opaque: ""
// User: nil
// Host: "ms.com"
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
// RawPath: ""
// ForceQuery: false
// RawQuery: "k1=v1&k2=v2"
// Fragment: "f"
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
u.Path += "/" // Append "/" to end before appending name
}
u.Path += name
return u
}
// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
// previously-returned Marker) to get the next segment. For more information, see
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) {
prefix, include, maxResults := o.pointers()
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
}
// ListContainersOptions defines options available when calling ListContainers.
type ListContainersSegmentOptions struct {
Detail ListContainersDetail // No IncludeType header is produced if ""
Prefix string // No Prefix header is produced if ""
MaxResults int32 // 0 means unspecified
// TODO: update swagger to generate this type?
}
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
if o.Prefix != "" {
prefix = &o.Prefix
}
if o.MaxResults != 0 {
if o.MaxResults < 0 {
panic("MaxResults must be >= 0")
}
maxResults = &o.MaxResults
}
include = ListContainersIncludeType(o.Detail.string())
return
}
// ListContainersFlatDetail indicates what additional information the service should return with each container.
type ListContainersDetail struct {
// Tells the service whether to return metadata for each container.
Metadata bool
}
// string produces the Include query parameter's value.
func (d *ListContainersDetail) string() string {
items := make([]string, 0, 1)
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
if d.Metadata {
items = append(items, string(ListContainersIncludeMetadata))
}
if len(items) > 0 {
return strings.Join(items, ",")
}
return string(ListContainersIncludeNone)
}
func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
return bsu.client.GetProperties(ctx, nil, nil)
}
func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
return bsu.client.SetProperties(ctx, properties, nil, nil)
}
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
return bsu.client.GetStatistics(ctx, nil, nil)
}

View File

@ -0,0 +1,3 @@
package azblob
const serviceLibVersion = "0.1"

View File

@ -0,0 +1,55 @@
package azblob
import (
"context"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// Credential represent any credential type; it is used to create a credential policy Factory.
type Credential interface {
pipeline.Factory
credentialMarker()
}
type credentialFunc pipeline.FactoryFunc
func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return f(next, po)
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (credentialFunc) credentialMarker() {}
//////////////////////////////
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
// or for use with Shared Access Signatures (SAS).
func NewAnonymousCredential() Credential {
return anonymousCredentialFactory
}
var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
// anonymousCredentialPolicyFactory is the credential's policy factory.
type anonymousCredentialPolicyFactory struct {
}
// New creates a credential policy object.
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return &anonymousCredentialPolicy{next: next}
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*anonymousCredentialPolicyFactory) credentialMarker() {}
// anonymousCredentialPolicy is the credential's policy object.
type anonymousCredentialPolicy struct {
next pipeline.Policy
}
// Do implements the credential's policy interface.
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// For anonymous credentials, this is effectively a no-op
return p.next.Do(ctx, request)
}

View File

@ -0,0 +1,187 @@
package azblob
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"net/http"
"net/url"
"sort"
"strings"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) *SharedKeyCredential {
bytes, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
panic(err)
}
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}
}
// SharedKeyCredential contains an account's name and its primary or secondary key.
// It is immutable making it shareable and goroutine-safe.
type SharedKeyCredential struct {
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
accountName string
accountKey []byte
}
// AccountName returns the Storage account's name.
func (f SharedKeyCredential) AccountName() string {
return f.accountName
}
// New creates a credential policy object.
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// Add a x-ms-date header if it doesn't already exist
if d := request.Header.Get(headerXmsDate); d == "" {
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
}
stringToSign := f.buildStringToSign(request)
signature := f.ComputeHMACSHA256(stringToSign)
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
request.Header[headerAuthorization] = []string{authHeader}
response, err := next.Do(ctx, request)
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
// Service failed to authenticate request, log it
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
}
return response, err
})
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*SharedKeyCredential) credentialMarker() {}
// Constants ensuring that header names are correctly spelled and consistently cased.
const (
headerAuthorization = "Authorization"
headerCacheControl = "Cache-Control"
headerContentEncoding = "Content-Encoding"
headerContentDisposition = "Content-Disposition"
headerContentLanguage = "Content-Language"
headerContentLength = "Content-Length"
headerContentMD5 = "Content-MD5"
headerContentType = "Content-Type"
headerDate = "Date"
headerIfMatch = "If-Match"
headerIfModifiedSince = "If-Modified-Since"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
headerUserAgent = "User-Agent"
headerXmsDate = "x-ms-date"
headerXmsVersion = "x-ms-version"
)
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
h := hmac.New(sha256.New, f.accountKey)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) string {
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
headers := request.Header
contentLength := headers.Get(headerContentLength)
if contentLength == "0" {
contentLength = ""
}
stringToSign := strings.Join([]string{
request.Method,
headers.Get(headerContentEncoding),
headers.Get(headerContentLanguage),
contentLength,
headers.Get(headerContentMD5),
headers.Get(headerContentType),
"", // Empty date because x-ms-date is expected (as per web page above)
headers.Get(headerIfModifiedSince),
headers.Get(headerIfMatch),
headers.Get(headerIfNoneMatch),
headers.Get(headerIfUnmodifiedSince),
headers.Get(headerRange),
buildCanonicalizedHeader(headers),
f.buildCanonicalizedResource(request.URL),
}, "\n")
return stringToSign
}
func buildCanonicalizedHeader(headers http.Header) string {
cm := map[string][]string{}
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
}
}
if len(cm) == 0 {
return ""
}
keys := make([]string, 0, len(cm))
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for i, key := range keys {
if i > 0 {
ch.WriteRune('\n')
}
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(strings.Join(cm[key], ","))
}
return string(ch.Bytes())
}
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string {
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
cr := bytes.NewBufferString("/")
cr.WriteString(f.accountName)
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
} else {
// a slash is required to indicate the root path
cr.WriteString("/")
}
// params is a map[string][]string; param name is key; params values is []string
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
if err != nil {
panic(err)
}
if len(params) > 0 { // There is at least 1 query parameter
paramNames := []string{} // We use this to sort the parameter key names
for paramName := range params {
paramNames = append(paramNames, paramName) // paramNames must be lowercase
}
sort.Strings(paramNames)
for _, paramName := range paramNames {
paramValues := params[paramName]
sort.Strings(paramValues)
// Join the sorted key values separated by ','
// Then prepend "keyName:"; then add this string to the buffer
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
}
}
return string(cr.Bytes())
}

View File

@ -0,0 +1,126 @@
package azblob
import (
"context"
"sync/atomic"
"runtime"
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// TokenCredential represents a token credential (which is also a pipeline.Factory).
type TokenCredential interface {
Credential
Token() string
SetToken(newToken string)
}
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
// tokenRefresher, then the function you pass will be called immediately (so it can refresh and change the
// TokenCredential's token value by calling SetToken; your tokenRefresher function must return a time.Duration
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
func NewTokenCredential(initialToken string, tokenRefresher func(credential TokenCredential) time.Duration) TokenCredential {
tc := &tokenCredential{}
tc.SetToken(initialToken) // We dont' set it above to guarantee atomicity
if tokenRefresher == nil {
return tc // If no callback specified, return the simple tokenCredential
}
tcwr := &tokenCredentialWithRefresh{token: tc}
tcwr.token.startRefresh(tokenRefresher)
runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) {
deadTC.token.stopRefresh()
deadTC.token = nil // Sanity (not really required)
})
return tcwr
}
// tokenCredentialWithRefresh is a wrapper over a token credential.
// When this wrapper object gets GC'd, it stops the tokenCredential's timer
// which allows the tokenCredential object to also be GC'd.
type tokenCredentialWithRefresh struct {
token *tokenCredential
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*tokenCredentialWithRefresh) credentialMarker() {}
// Token returns the current token value
func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() }
// SetToken changes the current token value
func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) }
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return f.token.New(next, po)
}
///////////////////////////////////////////////////////////////////////////////
// tokenCredential is a pipeline.Factory is the credential's policy factory.
type tokenCredential struct {
token atomic.Value
// The members below are only used if the user specified a tokenRefresher callback function.
timer *time.Timer
tokenRefresher func(c TokenCredential) time.Duration
lock sync.Mutex
stopped bool
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*tokenCredential) credentialMarker() {}
// Token returns the current token value
func (f *tokenCredential) Token() string { return f.token.Load().(string) }
// SetToken changes the current token value
func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
// startRefresh calls refresh which immediately calls tokenRefresher
// and then starts a timer to call tokenRefresher in the future.
func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) time.Duration) {
f.tokenRefresher = tokenRefresher
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
f.refresh()
}
// refresh calls the user's tokenRefresher so they can refresh the token (by
// calling SetToken) and then starts another time (based on the returned duration)
// in order to refresh the token again in the future.
func (f *tokenCredential) refresh() {
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
f.lock.Lock()
if !f.stopped {
f.timer = time.AfterFunc(d, f.refresh)
}
f.lock.Unlock()
}
// stopRefresh stops any pending timer and sets stopped field to true to prevent
// any new timer from starting.
// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object.
func (f *tokenCredential) stopRefresh() {
f.lock.Lock()
f.stopped = true
if f.timer != nil {
f.timer.Stop()
}
f.lock.Unlock()
}
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
if request.URL.Scheme != "https" {
panic("Token credentials require a URL using the https protocol scheme.")
}
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
return next.Do(ctx, request)
})
}

View File

@ -0,0 +1,27 @@
// +build linux darwin freebsd
package azblob
import (
"os"
"syscall"
)
type mmf []byte
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
if writable {
prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED
}
addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags)
return mmf(addr), err
}
func (m *mmf) unmap() {
err := syscall.Munmap(*m)
*m = nil
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,38 @@
package azblob
import (
"os"
"reflect"
"syscall"
"unsafe"
)
type mmf []byte
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
if writable {
prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
}
hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil)
if hMMF == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
defer syscall.CloseHandle(hMMF)
addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
m := mmf{}
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
h.Data = addr
h.Len = length
h.Cap = h.Len
return m, nil
}
func (m *mmf) unmap() {
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
*m = mmf{}
err := syscall.UnmapViewOfFile(addr)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,46 @@
package azblob
import (
"github.com/Azure/azure-pipeline-go/pipeline"
)
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
type PipelineOptions struct {
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
Log pipeline.LogOptions
// Retry configures the built-in retry policy behavior.
Retry RetryOptions
// RequestLog configures the built-in request logging policy.
RequestLog RequestLogOptions
// Telemetry configures the built-in telemetry policy behavior.
Telemetry TelemetryOptions
}
// NewPipeline creates a Pipeline using the specified credentials and options.
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
if c == nil {
panic("c can't be nil")
}
// Closest to API goes first; closest to the wire goes last
f := []pipeline.Factory{
NewTelemetryPolicyFactory(o.Telemetry),
NewUniqueRequestIDPolicyFactory(),
NewRetryPolicyFactory(o.Retry),
}
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
// changes made by other factories (like UniqueRequestIDPolicyFactory)
f = append(f, c)
}
f = append(f,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
NewRequestLogPolicyFactory(o.RequestLog))
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
}

View File

@ -0,0 +1,150 @@
package azblob
import (
"bytes"
"context"
"fmt"
"net/http"
"net/url"
"runtime"
"strings"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// RequestLogOptions configures the retry policy's behavior.
type RequestLogOptions struct {
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
// duration (-1=no logging; 0=default threshold).
LogWarningIfTryOverThreshold time.Duration
}
func (o RequestLogOptions) defaults() RequestLogOptions {
if o.LogWarningIfTryOverThreshold == 0 {
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
}
return o
}
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
o = o.defaults() // Force defaults to be calculated
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
// These variables are per-policy; shared by multiple calls to Do
var try int32
operationStart := time.Now() // If this is the 1st try, record the operation state time
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
try++ // The first try is #1 (not #0)
// Log the outgoing request as informational
if po.ShouldLog(pipeline.LogInfo) {
b := &bytes.Buffer{}
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
po.Log(pipeline.LogInfo, b.String())
}
// Set the time for this particular retry operation and then Do the operation.
tryStart := time.Now()
response, err = next.Do(ctx, request) // Make the request
tryEnd := time.Now()
tryDuration := tryEnd.Sub(tryStart)
opDuration := tryEnd.Sub(operationStart)
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
// If the response took too long, we'll upgrade to warning.
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
// Log a warning if the try duration exceeded the specified threshold
logLevel, forceLog = pipeline.LogWarning, true
}
if err == nil { // We got a response from the service
sc := response.Response().StatusCode
if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
} else {
// For other status codes, we leave the level as is.
}
} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
logLevel, forceLog = pipeline.LogError, true
}
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
// We're going to log this; build the string to log
b := &bytes.Buffer{}
slow := ""
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
}
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
if err != nil { // This HTTP request did not get a response from the service
fmt.Fprint(b, "REQUEST ERROR\n")
} else {
if logLevel == pipeline.LogError {
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
} else {
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
}
}
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
if logLevel <= pipeline.LogError {
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
}
msg := b.String()
if forceLog {
pipeline.ForceLog(logLevel, msg)
}
if shouldLog {
po.Log(logLevel, msg)
}
}
return response, err
}
})
}
func redactSigQueryParam(rawQuery string) (bool, string) {
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
sigFound := strings.Contains(rawQuery, "?sig=")
if !sigFound {
sigFound = strings.Contains(rawQuery, "&sig=")
if !sigFound {
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
}
}
// [?|&]sig= found, redact its value
values, _ := url.ParseQuery(rawQuery)
for name := range values {
if strings.EqualFold(name, "sig") {
values[name] = []string{"REDACTED"}
}
}
return sigFound, values.Encode()
}
func prepareRequestForLogging(request pipeline.Request) *http.Request {
req := request
if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound {
// Make copy so we don't destroy the query parameters we actually need to send in the request
req = request.Copy()
req.Request.URL.RawQuery = rawQuery
}
return req.Request
}
func stack() []byte {
buf := make([]byte, 1024)
for {
n := runtime.Stack(buf, false)
if n < len(buf) {
return buf[:n]
}
buf = make([]byte, 2*len(buf))
}
}

View File

@ -0,0 +1,318 @@
package azblob
import (
"context"
"math/rand"
"net"
"net/http"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
"io"
)
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
type RetryPolicy int32
const (
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
RetryPolicyExponential RetryPolicy = 0
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
RetryPolicyFixed RetryPolicy = 1
)
// RetryOptions configures the retry policy's behavior.
type RetryOptions struct {
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
// A value of zero means that you accept our default policy.
Policy RetryPolicy
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
MaxTries int32
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
// of data, the default TryTimeout will probably not be sufficient. You should override this value
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
TryTimeout time.Duration
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
// with each retry up to a maximum specified by MaxRetryDelay.
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
// equal to or greater than RetryDelay.
RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
// If you specify 0, then you must also specify 0 for RetryDelay.
MaxRetryDelay time.Duration
// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
}
func (o RetryOptions) retryReadsFromSecondaryHost() string {
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
//return "" // This is for non-blob SDKs
}
func (o RetryOptions) defaults() RetryOptions {
if o.Policy != RetryPolicyExponential && o.Policy != RetryPolicyFixed {
panic("RetryPolicy must be RetryPolicyExponential or RetryPolicyFixed")
}
if o.MaxTries < 0 {
panic("MaxTries must be >= 0")
}
if o.TryTimeout < 0 || o.RetryDelay < 0 || o.MaxRetryDelay < 0 {
panic("TryTimeout, RetryDelay, and MaxRetryDelay must all be >= 0")
}
if o.RetryDelay > o.MaxRetryDelay {
panic("RetryDelay must be <= MaxRetryDelay")
}
if (o.RetryDelay == 0 && o.MaxRetryDelay != 0) || (o.RetryDelay != 0 && o.MaxRetryDelay == 0) {
panic("Both RetryDelay and MaxRetryDelay must be 0 or neither can be 0")
}
IfDefault := func(current *time.Duration, desired time.Duration) {
if *current == time.Duration(0) {
*current = desired
}
}
// Set defaults if unspecified
if o.MaxTries == 0 {
o.MaxTries = 4
}
switch o.Policy {
case RetryPolicyExponential:
IfDefault(&o.TryTimeout, 1*time.Minute)
IfDefault(&o.RetryDelay, 4*time.Second)
IfDefault(&o.MaxRetryDelay, 120*time.Second)
case RetryPolicyFixed:
IfDefault(&o.TryTimeout, 1*time.Minute)
IfDefault(&o.RetryDelay, 30*time.Second)
IfDefault(&o.MaxRetryDelay, 120*time.Second)
}
return o
}
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
var result int64 = 1
for n := int32(0); n < exponent; n++ {
result *= number
}
return result
}
delay := time.Duration(0)
switch o.Policy {
case RetryPolicyExponential:
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
case RetryPolicyFixed:
if try > 1 { // Any try after the 1st uses the fixed delay
delay = o.RetryDelay
}
}
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
if delay > o.MaxRetryDelay {
delay = o.MaxRetryDelay
}
return delay
}
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
o = o.defaults() // Force defaults to be calculated
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
// Before each try, we'll select either the primary or secondary URL.
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
// If using a secondary:
// Even tries go against primary; odd tries go against the secondary
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
for try := int32(1); try <= o.MaxTries; try++ {
logf("\n=====> Try=%d\n", try)
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
tryingPrimary := !considerSecondary || (try%2 == 1)
// Select the correct host and delay
if tryingPrimary {
primaryTry++
delay := o.calcDelay(primaryTry)
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
time.Sleep(delay) // The 1st try returns 0 delay
} else {
delay := time.Second * time.Duration(rand.Float32()/2+0.8)
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
time.Sleep(delay) // Delay with some jitter before trying secondary
}
// Clone the original request to ensure that each try starts with the original (unmutated) request.
requestCopy := request.Copy()
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
// 1st try as for additional tries.
if err = requestCopy.RewindBody(); err != nil {
panic(err)
}
if !tryingPrimary {
requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost()
}
// Set the server-side timeout query parameter "timeout=[seconds]"
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
if t < timeout {
timeout = t
}
if timeout < 0 {
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
}
logf("TryTimeout adjusted to=%d sec\n", timeout)
}
q := requestCopy.Request.URL.Query()
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
requestCopy.Request.URL.RawQuery = q.Encode()
logf("Url=%s\n", requestCopy.Request.URL.String())
// Set the time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
response, err = next.Do(tryCtx, requestCopy) // Make the request
/*err = improveDeadlineExceeded(err)
if err == nil {
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
}*/
logf("Err=%v, response=%v\n", err, response)
action := "" // This MUST get changed within the switch code below
switch {
case ctx.Err() != nil:
action = "NoRetry: Op timeout"
case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound:
// If attempt was against the secondary & it returned a StatusNotFound (404), then
// the resource was not found. This may be due to replication delay. So, in this
// case, we'll never try the secondary again for this operation.
considerSecondary = false
action = "Retry: Secondary URL returned 404"
case err != nil:
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
action = "Retry: net.Error and Temporary() or Timeout()"
} else {
action = "NoRetry: unrecognized error"
}
default:
action = "NoRetry: successful HTTP request" // no error
}
logf("Action=%s\n", action)
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
if action[0] != 'R' { // Retry only if action starts with 'R'
if err != nil {
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
} else {
// TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled.
// Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
// So, when the user closes the Body, the our per-try context gets closed too.
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
_ = tryCancel // So, for now, we don't call cancel: cancel()
}
break // Don't retry
}
if response != nil && response.Response() != nil && response.Response().Body != nil {
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
body := response.Response().Body
io.Copy(ioutil.Discard, body)
body.Close()
}
// If retrying, cancel the current per-try timeout context
tryCancel()
}
return response, err // Not retryable or too many retries; return the last response/error
}
})
}
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
var logf = func(format string, a ...interface{}) {}
// Use this version to see the retry method's code path (import "fmt")
//var logf = fmt.Printf
/*
type deadlineExceededReadCloser struct {
r io.ReadCloser
}
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
n, err := 0, io.EOF
if r.r != nil {
n, err = r.r.Read(p)
}
return n, improveDeadlineExceeded(err)
}
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
// For an HTTP request, the ReadCloser MUST also implement seek
// For an HTTP response, Seek MUST not be called (or this will panic)
o, err := r.r.(io.Seeker).Seek(offset, whence)
return o, improveDeadlineExceeded(err)
}
func (r *deadlineExceededReadCloser) Close() error {
if c, ok := r.r.(io.Closer); ok {
c.Close()
}
return nil
}
// timeoutError is the internal struct that implements our richer timeout error.
type deadlineExceeded struct {
responseError
}
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
func improveDeadlineExceeded(cause error) error {
// If cause is not DeadlineExceeded, return the same error passed in.
if cause != context.DeadlineExceeded {
return cause
}
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
return &deadlineExceeded{
responseError: responseError{
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
},
}
}
// Error implements the error interface's Error method to return a string representation of the error.
func (e *deadlineExceeded) Error() string {
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
}
*/

View File

@ -0,0 +1,51 @@
package azblob
import (
"bytes"
"context"
"fmt"
"os"
"runtime"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// TelemetryOptions configures the telemetry policy's behavior.
type TelemetryOptions struct {
// Value is a string prepended to each request's User-Agent and sent to the service.
// The service records the user-agent in logs for diagnostics and tracking of client requests.
Value string
}
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
// which add telemetry information to outgoing HTTP requests.
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
b := &bytes.Buffer{}
b.WriteString(o.Value)
if b.Len() > 0 {
b.WriteRune(' ')
}
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
telemetryValue := b.String()
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
request.Header.Set("User-Agent", telemetryValue)
return next.Do(ctx, request)
}
})
}
// NOTE: the ONLY function that should write to this variable is this func
var platformInfo = func() string {
// Azure-Storage/version (runtime; os type and version)”
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
operatingSystem := runtime.GOOS // Default OS string
switch operatingSystem {
case "windows":
operatingSystem = os.Getenv("OS") // Get more specific OS information
case "linux": // accept default OS info
case "freebsd": // accept default OS info
}
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
}()

View File

@ -0,0 +1,24 @@
package azblob
import (
"context"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
// This is Policy's Do method:
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
id := request.Header.Get(xMsClientRequestID)
if id == "" { // Add a unique request ID if the caller didn't specify one already
request.Header.Set(xMsClientRequestID, newUUID().String())
}
return next.Do(ctx, request)
}
})
}
const xMsClientRequestID = "x-ms-client-request-id"

View File

@ -0,0 +1,122 @@
package azblob
import (
"context"
"io"
"net"
"net/http"
)
const CountToEnd = 0
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
// that should be used to make an HTTP GET request.
type HTTPGetterInfo struct {
// Offset specifies the start offset that should be used when
// creating the HTTP GET request's Range header
Offset int64
// Count specifies the count of bytes that should be used to calculate
// the end offset when creating the HTTP GET request's Range header
Count int64
// ETag specifies the resource's etag that should be used when creating
// the HTTP GET request's If-Match header
ETag ETag
}
// RetryReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
// while reading from a RetryReader. A value of zero means that no additional HTTP
// GET requests will be made.
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
}
// retryReader implements io.ReaderCloser methods.
// retryReader tries to read from response, and if there is retriable network error
// returned during reading, it will retry according to retry reader option through executing
// user defined action with provided data to get a new response, and continue the overall reading process
// through reading from the new response.
type retryReader struct {
ctx context.Context
response *http.Response
info HTTPGetterInfo
countWasBounded bool
o RetryReaderOptions
getter HTTPGetter
}
// NewRetryReader creates a retry reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
if getter == nil {
panic("getter must not be nil")
}
if info.Count < 0 {
panic("info.Count must be >= 0")
}
if o.MaxRetryRequests < 0 {
panic("o.MaxRetryRequests must be >= 0")
}
return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o}
}
func (s *retryReader) Read(p []byte) (n int, err error) {
for try := 0; ; try++ {
//fmt.Println(try) // Comment out for debugging.
if s.countWasBounded && s.info.Count == CountToEnd {
// User specified an original count and the remaining bytes are 0, return 0, EOF
return 0, io.EOF
}
if s.response == nil { // We don't have a response stream to read from, try to get one.
response, err := s.getter(s.ctx, s.info)
if err != nil {
return 0, err
}
// Successful GET; this is the network stream we'll read from.
s.response = response
}
n, err := s.response.Body.Read(p) // Read from the stream
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
err = &net.DNSError{IsTemporary: true}
}
// We successfully read data or end EOF.
if err == nil || err == io.EOF {
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Count != CountToEnd {
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
}
return n, err // Return the return to the caller
}
s.Close() // Error, close stream
s.response = nil // Our stream is no longer good
// Check the retry count and error code, and decide whether to retry.
if try >= s.o.MaxRetryRequests {
return n, err // All retries exhausted
}
if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) {
continue
// Loop around and try to get and read from new stream.
}
return n, err // Not retryable, just return
}
}
func (s *retryReader) Close() error {
if s.response != nil && s.response.Body != nil {
return s.response.Body.Close()
}
return nil
}

View File

@ -0,0 +1,217 @@
package azblob
import (
"bytes"
"fmt"
"strings"
"time"
)
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
type AccountSASSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
IPRange IPRange `param:"sip"`
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
}
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
panic("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
}
if v.Version == "" {
v.Version = SASVersion
}
perms := &AccountSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
}
v.Permissions = perms.String()
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
stringToSign := strings.Join([]string{
sharedKeyCredential.AccountName(),
v.Permissions,
v.Services,
v.ResourceTypes,
startTime,
expiryTime,
v.IPRange.String(),
string(v.Protocol),
v.Version,
""}, // That right, the account SAS requires a terminating extra newline
"\n")
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
p := SASQueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Account-specific SAS parameters
services: v.Services,
resourceTypes: v.ResourceTypes,
// Calculated SAS signature
signature: signature,
}
return p
}
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountSASPermissions struct {
Read, Write, Delete, List, Add, Create, Update, Process bool
}
// String produces the SAS permissions string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Permissions field.
func (p AccountSASPermissions) String() string {
var buffer bytes.Buffer
if p.Read {
buffer.WriteRune('r')
}
if p.Write {
buffer.WriteRune('w')
}
if p.Delete {
buffer.WriteRune('d')
}
if p.List {
buffer.WriteRune('l')
}
if p.Add {
buffer.WriteRune('a')
}
if p.Create {
buffer.WriteRune('c')
}
if p.Update {
buffer.WriteRune('u')
}
if p.Process {
buffer.WriteRune('p')
}
return buffer.String()
}
// Parse initializes the AccountSASPermissions's fields from a string.
func (p *AccountSASPermissions) Parse(s string) error {
*p = AccountSASPermissions{} // Clear out the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'u':
p.Update = true
case 'p':
p.Process = true
default:
return fmt.Errorf("Invalid permission character: '%v'", r)
}
}
return nil
}
// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
type AccountSASServices struct {
Blob, Queue, File bool
}
// String produces the SAS services string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Services field.
func (s AccountSASServices) String() string {
var buffer bytes.Buffer
if s.Blob {
buffer.WriteRune('b')
}
if s.Queue {
buffer.WriteRune('q')
}
if s.File {
buffer.WriteRune('f')
}
return buffer.String()
}
// Parse initializes the AccountSASServices' fields from a string.
func (a *AccountSASServices) Parse(s string) error {
*a = AccountSASServices{} // Clear out the flags
for _, r := range s {
switch r {
case 'b':
a.Blob = true
case 'q':
a.Queue = true
case 'f':
a.File = true
default:
return fmt.Errorf("Invalid service character: '%v'", r)
}
}
return nil
}
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
type AccountSASResourceTypes struct {
Service, Container, Object bool
}
// String produces the SAS resource types string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
func (rt AccountSASResourceTypes) String() string {
var buffer bytes.Buffer
if rt.Service {
buffer.WriteRune('s')
}
if rt.Container {
buffer.WriteRune('c')
}
if rt.Object {
buffer.WriteRune('o')
}
return buffer.String()
}
// Parse initializes the AccountSASResourceType's fields from a string.
func (rt *AccountSASResourceTypes) Parse(s string) error {
*rt = AccountSASResourceTypes{} // Clear out the flags
for _, r := range s {
switch r {
case 's':
rt.Service = true
case 'q':
rt.Container = true
case 'o':
rt.Object = true
default:
return fmt.Errorf("Invalid resource type: '%v'", r)
}
}
return nil
}

View File

@ -0,0 +1,211 @@
package azblob
import (
"net"
"net/url"
"strings"
"time"
)
// SASVersion indicates the SAS version.
const SASVersion = ServiceVersion
type SASProtocol string
const (
// SASProtocolHTTPS can be specified for a SAS protocol
SASProtocolHTTPS SASProtocol = "https"
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
SASProtocolHTTPSandHTTP SASProtocol = "https,http"
)
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
ss := ""
if !startTime.IsZero() {
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
}
se := ""
if !expiryTime.IsZero() {
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
}
return ss, se
}
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
//
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol SASProtocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
}
func (p *SASQueryParameters) Version() string {
return p.version
}
func (p *SASQueryParameters) Services() string {
return p.services
}
func (p *SASQueryParameters) ResourceTypes() string {
return p.resourceTypes
}
func (p *SASQueryParameters) Protocol() SASProtocol {
return p.protocol
}
func (p *SASQueryParameters) StartTime() time.Time {
return p.startTime
}
func (p *SASQueryParameters) ExpiryTime() time.Time {
return p.expiryTime
}
func (p *SASQueryParameters) IPRange() IPRange {
return p.ipRange
}
func (p *SASQueryParameters) Identifier() string {
return p.identifier
}
func (p *SASQueryParameters) Resource() string {
return p.resource
}
func (p *SASQueryParameters) Permissions() string {
return p.permissions
}
func (p *SASQueryParameters) Signature() string {
return p.signature
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
End net.IP // Not specified if length = 0
}
// String returns a string representation of an IPRange.
func (ipr *IPRange) String() string {
if len(ipr.Start) == 0 {
return ""
}
start := ipr.Start.String()
if len(ipr.End) == 0 {
return start
}
return start + "-" + ipr.End.String()
}
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
// all SAS-related query parameters are removed from the passed-in map. If
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
p := SASQueryParameters{}
for k, v := range values {
val := v[0]
isSASKey := true
switch strings.ToLower(k) {
case "sv":
p.version = val
case "ss":
p.services = val
case "srt":
p.resourceTypes = val
case "spr":
p.protocol = SASProtocol(val)
case "st":
p.startTime, _ = time.Parse(SASTimeFormat, val)
case "se":
p.expiryTime, _ = time.Parse(SASTimeFormat, val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
p.ipRange.Start = net.ParseIP(val)
} else {
p.ipRange.Start = net.ParseIP(val[:dashIndex])
p.ipRange.End = net.ParseIP(val[dashIndex+1:])
}
case "si":
p.identifier = val
case "sr":
p.resource = val
case "sp":
p.permissions = val
case "sig":
p.signature = val
default:
isSASKey = false // We didn't recognize the query parameter
}
if isSASKey && deleteSASParametersFromValues {
delete(values, k)
}
}
return p
}
// AddToValues adds the SAS components to the specified query parameters map.
func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
if p.version != "" {
v.Add("sv", p.version)
}
if p.services != "" {
v.Add("ss", p.services)
}
if p.resourceTypes != "" {
v.Add("srt", p.resourceTypes)
}
if p.protocol != "" {
v.Add("spr", string(p.protocol))
}
if !p.startTime.IsZero() {
v.Add("st", p.startTime.Format(SASTimeFormat))
}
if !p.expiryTime.IsZero() {
v.Add("se", p.expiryTime.Format(SASTimeFormat))
}
if len(p.ipRange.Start) > 0 {
v.Add("sip", p.ipRange.String())
}
if p.identifier != "" {
v.Add("si", p.identifier)
}
if p.resource != "" {
v.Add("sr", p.resource)
}
if p.permissions != "" {
v.Add("sp", p.permissions)
}
if p.signature != "" {
v.Add("sig", p.signature)
}
return v
}
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
func (p *SASQueryParameters) Encode() string {
v := url.Values{}
p.addToValues(v)
return v.Encode()
}

View File

@ -0,0 +1,131 @@
package azblob
// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
const (
// ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
ServiceCodeNone ServiceCodeType = ""
// ServiceCodeAccountAlreadyExists means the specified account already exists.
ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists"
// ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated"
// ServiceCodeAccountIsDisabled means the specified account is disabled (403).
ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled"
// ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed"
// ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported"
// ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet"
// ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey"
// ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions"
// ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
ServiceCodeInternalError ServiceCodeType = "InternalError"
// ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo"
// ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue"
// ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb"
// ServiceCodeInvalidInput means one of the request inputs is not valid (400).
ServiceCodeInvalidInput ServiceCodeType = "InvalidInput"
// ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5"
// ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata"
// ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue"
// ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
ServiceCodeInvalidRange ServiceCodeType = "InvalidRange"
// ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName"
// ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
ServiceCodeInvalidURI ServiceCodeType = "InvalidUri"
// ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument"
// ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue"
// ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch"
// ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge"
// ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader"
// ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter"
// ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader"
// ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode"
// ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported"
// ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut"
// ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput"
// ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue"
// ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge"
// ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch"
// ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse"
// ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists"
// ServiceCodeResourceNotFound means the specified resource does not exist (404).
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
ServiceCodeServerBusy ServiceCodeType = "ServerBusy"
// ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader"
// ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode"
// ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter"
// ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb"
)

View File

@ -0,0 +1,110 @@
package azblob
import (
"bytes"
"encoding/xml"
"fmt"
"net/http"
"sort"
"github.com/Azure/azure-pipeline-go/pipeline"
)
func init() {
// wire up our custom error handling constructor
responseErrorFactory = newStorageError
}
// ServiceCodeType is a string identifying a storage service error.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
type ServiceCodeType string
// StorageError identifies a responder-generated network or response parsing error.
type StorageError interface {
// ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
ResponseError
// ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
ServiceCode() ServiceCodeType
}
// storageError is the internal struct that implements the public StorageError interface.
type storageError struct {
responseError
serviceCode ServiceCodeType
details map[string]string
}
// newStorageError creates an error object that implements the error interface.
func newStorageError(cause error, response *http.Response, description string) error {
return &storageError{
responseError: responseError{
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
response: response,
description: description,
},
}
}
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode }
// Error implements the error interface's Error method to return a string representation of the error.
func (e *storageError) Error() string {
b := &bytes.Buffer{}
fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode)
fmt.Fprintf(b, "Description=%s, Details: ", e.description)
if len(e.details) == 0 {
b.WriteString("(none)\n")
} else {
b.WriteRune('\n')
keys := make([]string, 0, len(e.details))
// Alphabetize the details
for k := range e.details {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprintf(b, " %s: %+v\n", k, e.details[k])
}
}
req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil)
return e.ErrorNode.Error(b.String())
}
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
func (e *storageError) Temporary() bool {
if e.response != nil {
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
return true
}
}
return e.ErrorNode.Temporary()
}
// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
tokName := ""
var t xml.Token
for t, err = d.Token(); err == nil; t, err = d.Token() {
switch tt := t.(type) {
case xml.StartElement:
tokName = tt.Name.Local
break
case xml.CharData:
switch tokName {
case "Code":
e.serviceCode = ServiceCodeType(tt)
case "Message":
e.description = string(tt)
default:
if e.details == nil {
e.details = map[string]string{}
}
e.details[tokName] = string(tt)
}
}
}
return nil
}

View File

@ -0,0 +1,61 @@
package azblob
import (
"errors"
"fmt"
"io"
"strconv"
)
// httpRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange
// which has an offset but na zero value count indicates from the offset to the resource's end.
type httpRange struct {
offset int64
count int64
}
func (r httpRange) pointers() *string {
if r.offset == 0 && r.count == 0 { // Do common case first for performance
return nil // No specified range
}
if r.offset < 0 {
panic("The range offset must be >= 0")
}
if r.count < 0 {
panic("The range count must be >= 0")
}
endOffset := "" // if count == 0
if r.count > 0 {
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
}
dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset)
return &dataRange
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 {
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
return 0
}
validateSeekableStreamAt0(body)
count, err := body.Seek(0, io.SeekEnd)
if err != nil {
panic("failed to seek stream")
}
body.Seek(0, io.SeekStart)
return count
}
func validateSeekableStreamAt0(body io.ReadSeeker) {
if body == nil { // nil body's are "logically" seekable to 0
return
}
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
if err != nil {
panic(err)
}
panic(errors.New("stream must be set to position 0"))
}
}

View File

@ -0,0 +1,80 @@
package azblob
import (
"crypto/rand"
"fmt"
"strconv"
)
// The UUID reserved variants.
const (
reservedNCS byte = 0x80
reservedRFC4122 byte = 0x40
reservedMicrosoft byte = 0x20
reservedFuture byte = 0x00
)
// A UUID representation compliant with specification in RFC 4122 document.
type uuid [16]byte
// NewUUID returns a new uuid using RFC 4122 algorithm.
func newUUID() (u uuid) {
u = uuid{}
// Set all bits to randomly (or pseudo-randomly) chosen values.
_, err := rand.Read(u[:])
if err != nil {
panic("ran.Read failed")
}
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
var version byte = 4
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
return
}
// String returns an unparsed version of the generated UUID sequence.
func (u uuid) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
func parseUUID(uuidStr string) uuid {
char := func(hexString string) byte {
i, _ := strconv.ParseUint(hexString, 16, 8)
return byte(i)
}
if uuidStr[0] == '{' {
uuidStr = uuidStr[1:] // Skip over the '{'
}
// 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
// 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
// 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
uuidVal := uuid{
char(uuidStr[0:2]),
char(uuidStr[2:4]),
char(uuidStr[4:6]),
char(uuidStr[6:8]),
char(uuidStr[9:11]),
char(uuidStr[11:13]),
char(uuidStr[14:16]),
char(uuidStr[16:18]),
char(uuidStr[19:21]),
char(uuidStr[21:23]),
char(uuidStr[24:26]),
char(uuidStr[26:28]),
char(uuidStr[28:30]),
char(uuidStr[30:32]),
char(uuidStr[32:34]),
char(uuidStr[34:36]),
}
return uuidVal
}
func (u uuid) bytes() []byte {
return u[:]
}

View File

@ -0,0 +1,89 @@
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package azblob allows you to manipulate Azure Storage containers and blobs objects.
URL Types
The most common types you'll work with are the XxxURL types. The methods of these types make requests
against the Azure Storage Service.
- ServiceURL's methods perform operations on a storage account.
- ContainerURL's methods perform operations on an account's container.
- BlockBlobURL's methods perform operations on a container's block blob.
- AppendBlobURL's methods perform operations on a container's append blob.
- PageBlobURL's methods perform operations on a container's page blob.
- BlobURL's methods perform operations on a container's blob regardless of the blob's type.
Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP
request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed.
The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass
an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own
URL but it shares the same pipeline as the parent ServiceURL object.
To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob.
To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL
respectively. These three types are all identical except for the methods they expose; each type exposes the methods
relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL;
this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL,
the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You
can easily switch between blob types (method sets) by calling a ToXxxBlobURL method.
If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL
object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object
with the same URL as the original but with the specified pipeline.
Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that
XxxURL objects share a lot of system resources making them very efficient.
All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures,
transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an
example of how to do deal with errors.
URL and Shared Access Signature Manipulation
The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types
for generating and parsing Shared Access Signature (SAS)
- Use the AccountSASSignatureValues type to create a SAS for a storage account.
- Use the BlobSASSignatureValues type to create a SAS for a container or blob.
- Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters.
To generate a SAS, you must use the SharedKeyCredential type.
Credentials
When creating a request pipeline, you must specify one of this package's credential types.
- Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS).
- Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this
to generate Shared Access Signatures.
HTTP Request Policy Factories
This package defines several request policy factories for use with the pipeline package.
Most applications will not use these factories directly; instead, the NewPipeline
function creates these factories, initializes them (via the PipelineOptions type)
and returns a pipeline object for use by the XxxURL objects.
However, for advanced scenarios, developers can access these policy factories directly
and even create their own and then construct their own pipeline in order to affect HTTP
requests and responses performed by the XxxURL objects. For example, developers can
introduce their own logging, random failures, request recording & playback for fast
testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The
possibilities are endless!
Below are the request pipeline policy factory functions that are provided with this
package:
- NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests.
- NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures.
- NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests.
- NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures.
Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline.
*/
package azblob
// TokenCredential Use this to access resources using Role-Based Access Control (RBAC).

View File

@ -0,0 +1,234 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/base64"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
type appendBlobClient struct {
managementClient
}
// newAppendBlobClient creates an instance of the appendBlobClient client.
func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
return appendBlobClient{newManagementClient(url, p)}
}
// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append
// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is
// supported only on version 2015-02-21 version or later.
//
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
// only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
// in the analytics logs when storage analytics logging is enabled.
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.appendBlockPreparer(body, contentLength, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req)
if err != nil {
return nil, err
}
return resp.(*AppendBlobAppendBlockResponse), err
}
// appendBlockPreparer prepares the AppendBlock request.
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "appendblock")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if maxSize != nil {
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10))
}
if appendPosition != nil {
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// appendBlockResponder handles the response to the AppendBlock request.
func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err
}
// Create the Create Append Blob operation creates a new append blob.
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
if err != nil {
return nil, err
}
return resp.(*AppendBlobCreateResponse), err
}
// createPreparer prepares the Create request.
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-blob-type", "AppendBlob")
return req, nil
}
// createResponder handles the response to the Create request.
func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,498 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"bytes"
"context"
"encoding/base64"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.
type blockBlobClient struct {
managementClient
}
// newBlockBlobClient creates an instance of the blockBlobClient client.
func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
return blockBlobClient{newManagementClient(url, p)}
}
// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the
// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior
// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed,
// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from
// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
// block, whichever list it may belong to.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified,
// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's
// content type. If specified, this property is stored with the blob and returned with a read request.
// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockBlobCommitBlockListResponse), err
}
// commitBlockListPreparer prepares the CommitBlockList request.
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "blocklist")
req.URL.RawQuery = params.Encode()
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
b, err := xml.Marshal(blocks)
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
}
req.Header.Set("Content-Type", "application/xml")
err = req.SetBody(bytes.NewReader(b))
if err != nil {
return req, pipeline.NewError(err, "failed to set request body")
}
return req, nil
}
// commitBlockListResponder handles the response to the CommitBlockList request.
func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err
}
// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block
// blob
//
// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists
// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob
// snapshot to retrieve. For more information on working with blob snapshots, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockList), err
}
// getBlockListPreparer prepares the GetBlockList request.
func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
params.Set("blocklisttype", string(listType))
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "blocklist")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getBlockListResponder handles the response to the GetBlockList request.
func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &BlockList{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// StageBlock the Stage Block operation creates a new block to be committed as part of a blob
//
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
// successful return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is
// expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.stageBlockPreparer(blockID, contentLength, body, timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockBlobStageBlockResponse), err
}
// stageBlockPreparer prepares the StageBlock request.
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
params.Set("blockid", blockID)
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "block")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// stageBlockResponder handles the response to the StageBlock request.
func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err
}
// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents
// are read from a URL.
//
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
// same size for each block. contentLength is the length of the request. sourceURL is specifiy an URL to the copy
// source. sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
// seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockBlobStageBlockFromURLResponse), err
}
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
params.Set("blockid", blockID)
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "block")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if sourceURL != nil {
req.Header.Set("x-ms-copy-source", *sourceURL)
}
if sourceRange != nil {
req.Header.Set("x-ms-source-range", *sourceRange)
}
if sourceContentMD5 != nil {
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// stageBlockFromURLResponder handles the response to the StageBlockFromURL request.
func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err
}
// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block
// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of
// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a
// block blob, use the Put Block List operation.
//
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockBlobUploadResponse), err
}
// uploadPreparer prepares the Upload request.
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-blob-type", "BlockBlob")
return req, nil
}
// uploadResponder handles the response to the Upload request.
func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err
}

View File

@ -0,0 +1,38 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/azure-pipeline-go/pipeline"
"net/url"
)
const (
// ServiceVersion specifies the version of the operations used in this package.
ServiceVersion = "2018-03-28"
)
// managementClient is the base client for Azblob.
type managementClient struct {
url url.URL
p pipeline.Pipeline
}
// newManagementClient creates an instance of the managementClient client.
func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient {
return managementClient{
url: url,
p: p,
}
}
// URL returns a copy of the URL for this client.
func (mc managementClient) URL() url.URL {
return mc.url
}
// Pipeline returns the pipeline for this client.
func (mc managementClient) Pipeline() pipeline.Pipeline {
return mc.p
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,794 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/base64"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
// pageBlobClient is the client for the PageBlob methods of the Azblob service.
type pageBlobClient struct {
managementClient
}
// newPageBlobClient creates an instance of the pageBlobClient client.
func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
return pageBlobClient{newManagementClient(url, p)}
}
// ClearPages the Clear Pages operation clears a set of pages from a page blob
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobClearPagesResponse), err
}
// clearPagesPreparer prepares the ClearPages request.
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "page")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
if ifSequenceNumberLessThan != nil {
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
}
if ifSequenceNumberEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-page-write", "clear")
return req, nil
}
// clearPagesResponder handles the response to the ClearPages request.
func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err
}
// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are
// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
// copied from as usual. This API is supported since REST version 2016-05-31.
//
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
// expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.copyIncrementalPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobCopyIncrementalResponse), err
}
// copyIncrementalPreparer prepares the CopyIncremental request.
func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "incrementalcopy")
req.URL.RawQuery = params.Encode()
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-copy-source", copySource)
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// copyIncrementalResponder handles the response to the CopyIncremental request.
func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err
}
// Create the Create operation creates a new page blob.
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentLength is this
// header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte
// boundary. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentLength, blobSequenceNumber, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobCreateResponse), err
}
// createPreparer prepares the Create request.
func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if blobContentLength != nil {
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(*blobContentLength, 10))
}
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-blob-type", "PageBlob")
return req, nil
}
// createResponder handles the response to the Create request.
func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobCreateResponse{rawResponse: resp.Response()}, err
}
// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a
// page blob
//
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// retrieve. For more information on working with blob snapshots, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageList), err
}
// getPageRangesPreparer prepares the GetPageRanges request.
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode()
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getPageRangesResponder handles the response to the GetPageRanges request.
func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &PageList{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob
// that were changed between target blob and previous snapshot.
//
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// retrieve. For more information on working with blob snapshots, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled.
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageList), err
}
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
if prevsnapshot != nil && len(*prevsnapshot) > 0 {
params.Set("prevsnapshot", *prevsnapshot)
}
params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode()
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request.
func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &PageList{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// Resize resize the Blob
//
// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must
// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
// see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
// analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobResizeResponse), err
}
// resizePreparer prepares the Resize request.
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// resizeResponder handles the response to the Resize request.
func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobResizeResponse{rawResponse: resp.Response()}, err
}
// UpdateSequenceNumber update the sequence number of the blob
//
// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
// is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobSequenceNumber, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobUpdateSequenceNumberResponse), err
}
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction))
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request.
func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err
}
// UploadPages the Upload Pages operation writes a range of pages to a page blob
//
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.uploadPagesPreparer(body, contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobUploadPagesResponse), err
}
// uploadPagesPreparer prepares the UploadPages request.
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "page")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
if ifSequenceNumberLessThan != nil {
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
}
if ifSequenceNumberEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-page-write", "update")
return req, nil
}
// uploadPagesResponder handles the response to the UploadPages request.
func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err
}

View File

@ -0,0 +1,74 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"bytes"
"context"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
)
type responder func(resp pipeline.Response) (result pipeline.Response, err error)
// ResponderPolicyFactory is a Factory capable of creating a responder pipeline.
type responderPolicyFactory struct {
responder responder
}
// New creates a responder policy factory.
func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return responderPolicy{next: next, responder: arpf.responder}
}
type responderPolicy struct {
next pipeline.Policy
responder responder
}
// Do sends the request to the service and validates/deserializes the HTTP response.
func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
resp, err := arp.next.Do(ctx, request)
if err != nil {
return resp, err
}
return arp.responder(resp)
}
// validateResponse checks an HTTP response's status code against a legal set of codes.
// If the response code is not legal, then validateResponse reads all of the response's body
// (containing error information) and returns a response error.
func validateResponse(resp pipeline.Response, successStatusCodes ...int) error {
if resp == nil {
return NewResponseError(nil, nil, "nil response")
}
responseCode := resp.Response().StatusCode
for _, i := range successStatusCodes {
if i == responseCode {
return nil
}
}
// only close the body in the failure case. in the
// success case responders will close the body as required.
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return NewResponseError(err, resp.Response(), "failed to read response body")
}
// the service code, description and details will be populated during unmarshalling
responseError := NewResponseError(nil, resp.Response(), resp.Response().Status)
if len(b) > 0 {
if err = xml.Unmarshal(b, &responseError); err != nil {
return NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return responseError
}
// removes any BOM from the byte slice
func removeBOM(b []byte) []byte {
// UTF8
return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
}

View File

@ -0,0 +1,95 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"bytes"
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"net"
"net/http"
)
// if you want to provide custom error handling set this variable to your constructor function
var responseErrorFactory func(cause error, response *http.Response, description string) error
// ResponseError identifies a responder-generated network or response parsing error.
type ResponseError interface {
// Error exposes the Error(), Temporary() and Timeout() methods.
net.Error // Includes the Go error interface
// Response returns the HTTP response. You may examine this but you should not modify it.
Response() *http.Response
}
// NewResponseError creates an error object that implements the error interface.
func NewResponseError(cause error, response *http.Response, description string) error {
if responseErrorFactory != nil {
return responseErrorFactory(cause, response, description)
}
return &responseError{
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
response: response,
description: description,
}
}
// responseError is the internal struct that implements the public ResponseError interface.
type responseError struct {
pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause
response *http.Response
description string
}
// Error implements the error interface's Error method to return a string representation of the error.
func (e *responseError) Error() string {
b := &bytes.Buffer{}
fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode)
fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description)
s := b.String()
return e.ErrorNode.Error(s)
}
// Response implements the ResponseError interface's method to return the HTTP response.
func (e *responseError) Response() *http.Response {
return e.response
}
// RFC7807 PROBLEM ------------------------------------------------------------------------------------
// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members.
/*type RFC7807Problem struct {
// Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation).
typeURI string // Should default to "about:blank"
// Optional: Short, human-readable summary (maybe localized).
title string
// Optional: HTTP status code generated by the origin server
status int
// Optional: Human-readable explanation for this problem occurance.
// Should help client correct the problem. Clients should NOT parse this string.
detail string
// Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced).
instance string
}
// NewRFC7807Problem ...
func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error {
return &RFC7807Problem{
typeURI: typeURI,
status: status,
title: fmt.Sprintf(titleFormat, a...),
}
}
// Error returns the error information as a string.
func (e *RFC7807Problem) Error() string {
return e.title
}
// TypeURI ...
func (e *RFC7807Problem) TypeURI() string {
if e.typeURI == "" {
e.typeURI = "about:blank"
}
return e.typeURI
}
// Members ...
func (e *RFC7807Problem) Members() (status int, title, detail, instance string) {
return e.status, e.title, e.detail, e.instance
}*/

View File

@ -0,0 +1,350 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"bytes"
"context"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
// serviceClient is the client for the Service methods of the Azblob service.
type serviceClient struct {
managementClient
}
// newServiceClient creates an instance of the serviceClient client.
func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
return serviceClient{newManagementClient(url, p)}
}
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
// and CORS (Cross-Origin Resource Sharing) rules.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPropertiesPreparer(timeout, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*StorageServiceProperties), err
}
// getPropertiesPreparer prepares the GetProperties request.
func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("restype", "service")
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getPropertiesResponder handles the response to the GetProperties request.
func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &StorageServiceProperties{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the
// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getStatisticsPreparer(timeout, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req)
if err != nil {
return nil, err
}
return resp.(*StorageServiceStats), err
}
// getStatisticsPreparer prepares the GetStatistics request.
func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("restype", "service")
params.Set("comp", "stats")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getStatisticsResponder handles the response to the GetStatistics request.
func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &StorageServiceStats{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified
// account
//
// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a
// string value that identifies the portion of the list of containers to be returned with the next listing operation.
// The operation returns the NextMarker value within the response body if the listing operation did not return all
// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the
// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the
// client. maxresults is specifies the maximum number of containers to return. If the request does not specify
// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the
// remainder of the results. For this reason, it is possible that the service will return fewer results than specified
// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's
// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For
// more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) {
if err := validate([]validation{
{targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false,
chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ListContainersResponse), err
}
// listContainersSegmentPreparer prepares the ListContainersSegment request.
func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if prefix != nil && len(*prefix) > 0 {
params.Set("prefix", *prefix)
}
if marker != nil && len(*marker) > 0 {
params.Set("marker", *marker)
}
if maxresults != nil {
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
}
if include != ListContainersIncludeNone {
params.Set("include", string(include))
}
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "list")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// listContainersSegmentResponder handles the response to the ListContainersSegment request.
func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &ListContainersResponse{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage
// Analytics and CORS (Cross-Origin Resource Sharing) rules
//
// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds.
// For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) {
if err := validate([]validation{
{targetValue: storageServiceProperties,
constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true,
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}},
{target: "storageServiceProperties.HourMetrics", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}},
{target: "storageServiceProperties.MinuteMetrics", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}},
{target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ServiceSetPropertiesResponse), err
}
// setPropertiesPreparer prepares the SetProperties request.
func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("restype", "service")
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
b, err := xml.Marshal(storageServiceProperties)
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
}
req.Header.Set("Content-Type", "application/xml")
err = req.SetBody(bytes.NewReader(b))
if err != nil {
return req, pipeline.NewError(err, "failed to set request body")
}
return req, nil
}
// setPropertiesResponder handles the response to the SetProperties request.
func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
}

View File

@ -0,0 +1,367 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"reflect"
"regexp"
"strings"
)
// Constraint stores constraint name, target field name
// Rule and chain validations.
type constraint struct {
// Target field name for validation.
target string
// Constraint name e.g. minLength, MaxLength, Pattern, etc.
name string
// Rule for constraint e.g. greater than 10, less than 5 etc.
rule interface{}
// Chain validations for struct type
chain []constraint
}
// Validation stores parameter-wise validation.
type validation struct {
targetValue interface{}
constraints []constraint
}
// Constraint list
const (
empty = "Empty"
null = "Null"
readOnly = "ReadOnly"
pattern = "Pattern"
maxLength = "MaxLength"
minLength = "MinLength"
maxItems = "MaxItems"
minItems = "MinItems"
multipleOf = "MultipleOf"
uniqueItems = "UniqueItems"
inclusiveMaximum = "InclusiveMaximum"
exclusiveMaximum = "ExclusiveMaximum"
exclusiveMinimum = "ExclusiveMinimum"
inclusiveMinimum = "InclusiveMinimum"
)
// Validate method validates constraints on parameter
// passed in validation array.
func validate(m []validation) error {
for _, item := range m {
v := reflect.ValueOf(item.targetValue)
for _, constraint := range item.constraints {
var err error
switch v.Kind() {
case reflect.Ptr:
err = validatePtr(v, constraint)
case reflect.String:
err = validateString(v, constraint)
case reflect.Struct:
err = validateStruct(v, constraint)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
err = validateInt(v, constraint)
case reflect.Float32, reflect.Float64:
err = validateFloat(v, constraint)
case reflect.Array, reflect.Slice, reflect.Map:
err = validateArrayMap(v, constraint)
default:
err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
}
if err != nil {
return err
}
}
}
return nil
}
func validateStruct(x reflect.Value, v constraint, name ...string) error {
//Get field name from target name which is in format a.b.c
s := strings.Split(v.target, ".")
f := x.FieldByName(s[len(s)-1])
if isZero(f) {
return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target))
}
err := validate([]validation{
{
targetValue: getInterfaceValue(f),
constraints: []constraint{v},
},
})
return err
}
func validatePtr(x reflect.Value, v constraint) error {
if v.name == readOnly {
if !x.IsNil() {
return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
}
return nil
}
if x.IsNil() {
return checkNil(x, v)
}
if v.chain != nil {
return validate([]validation{
{
targetValue: getInterfaceValue(x.Elem()),
constraints: v.chain,
},
})
}
return nil
}
func validateInt(x reflect.Value, v constraint) error {
i := x.Int()
r, ok := v.rule.(int)
if !ok {
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
}
switch v.name {
case multipleOf:
if i%int64(r) != 0 {
return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
}
case exclusiveMinimum:
if i <= int64(r) {
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
}
case exclusiveMaximum:
if i >= int64(r) {
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
}
case inclusiveMinimum:
if i < int64(r) {
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
}
case inclusiveMaximum:
if i > int64(r) {
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
}
default:
return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name))
}
return nil
}
func validateFloat(x reflect.Value, v constraint) error {
f := x.Float()
r, ok := v.rule.(float64)
if !ok {
return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule))
}
switch v.name {
case exclusiveMinimum:
if f <= r {
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
}
case exclusiveMaximum:
if f >= r {
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
}
case inclusiveMinimum:
if f < r {
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
}
case inclusiveMaximum:
if f > r {
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
}
default:
return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name))
}
return nil
}
func validateString(x reflect.Value, v constraint) error {
s := x.String()
switch v.name {
case empty:
if len(s) == 0 {
return checkEmpty(x, v)
}
case pattern:
reg, err := regexp.Compile(v.rule.(string))
if err != nil {
return createError(x, v, err.Error())
}
if !reg.MatchString(s) {
return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule))
}
case maxLength:
if _, ok := v.rule.(int); !ok {
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
}
if len(s) > v.rule.(int) {
return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule))
}
case minLength:
if _, ok := v.rule.(int); !ok {
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
}
if len(s) < v.rule.(int) {
return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule))
}
case readOnly:
if len(s) > 0 {
return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
}
default:
return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name))
}
if v.chain != nil {
return validate([]validation{
{
targetValue: getInterfaceValue(x),
constraints: v.chain,
},
})
}
return nil
}
func validateArrayMap(x reflect.Value, v constraint) error {
switch v.name {
case null:
if x.IsNil() {
return checkNil(x, v)
}
case empty:
if x.IsNil() || x.Len() == 0 {
return checkEmpty(x, v)
}
case maxItems:
if _, ok := v.rule.(int); !ok {
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
}
if x.Len() > v.rule.(int) {
return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len()))
}
case minItems:
if _, ok := v.rule.(int); !ok {
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
}
if x.Len() < v.rule.(int) {
return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len()))
}
case uniqueItems:
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
if !checkForUniqueInArray(x) {
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
}
} else if x.Kind() == reflect.Map {
if !checkForUniqueInMap(x) {
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
}
} else {
return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind()))
}
case readOnly:
if x.Len() != 0 {
return createError(x, v, "readonly parameter; must send as nil or empty in request")
}
case pattern:
reg, err := regexp.Compile(v.rule.(string))
if err != nil {
return createError(x, v, err.Error())
}
keys := x.MapKeys()
for _, k := range keys {
if !reg.MatchString(k.String()) {
return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule))
}
}
default:
return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name))
}
if v.chain != nil {
return validate([]validation{
{
targetValue: getInterfaceValue(x),
constraints: v.chain,
},
})
}
return nil
}
func checkNil(x reflect.Value, v constraint) error {
if _, ok := v.rule.(bool); !ok {
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
}
if v.rule.(bool) {
return createError(x, v, "value can not be null; required parameter")
}
return nil
}
func checkEmpty(x reflect.Value, v constraint) error {
if _, ok := v.rule.(bool); !ok {
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
}
if v.rule.(bool) {
return createError(x, v, "value can not be null or empty; required parameter")
}
return nil
}
func checkForUniqueInArray(x reflect.Value) bool {
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
return false
}
arrOfInterface := make([]interface{}, x.Len())
for i := 0; i < x.Len(); i++ {
arrOfInterface[i] = x.Index(i).Interface()
}
m := make(map[interface{}]bool)
for _, val := range arrOfInterface {
if m[val] {
return false
}
m[val] = true
}
return true
}
func checkForUniqueInMap(x reflect.Value) bool {
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
return false
}
mapOfInterface := make(map[interface{}]interface{}, x.Len())
keys := x.MapKeys()
for _, k := range keys {
mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
}
m := make(map[interface{}]bool)
for _, val := range mapOfInterface {
if m[val] {
return false
}
m[val] = true
}
return true
}
func getInterfaceValue(x reflect.Value) interface{} {
if x.Kind() == reflect.Invalid {
return nil
}
return x.Interface()
}
func isZero(x interface{}) bool {
return x == reflect.Zero(reflect.TypeOf(x)).Interface()
}
func createError(x reflect.Value, v constraint, message string) error {
return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s",
v.target, v.name, getInterfaceValue(x), message))
}

View File

@ -0,0 +1,14 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
return "Azure-SDK-For-Go/0.0.0 azblob/2018-03-28"
}
// Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return "0.0.0"
}

View File

@ -0,0 +1,242 @@
package azblob
import (
"context"
"io"
"net/http"
"time"
)
// BlobHTTPHeaders contains read/writeable blob properties.
type BlobHTTPHeaders struct {
ContentType string
ContentMD5 []byte
ContentEncoding string
ContentLanguage string
ContentDisposition string
CacheControl string
}
// NewHTTPHeaders returns the user-modifiable properties for this blob.
func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
ContentType: bgpr.ContentType(),
ContentEncoding: bgpr.ContentEncoding(),
ContentLanguage: bgpr.ContentLanguage(),
ContentDisposition: bgpr.ContentDisposition(),
CacheControl: bgpr.CacheControl(),
ContentMD5: bgpr.ContentMD5(),
}
}
///////////////////////////////////////////////////////////////////////////////
// NewHTTPHeaders returns the user-modifiable properties for this blob.
func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
ContentType: dr.ContentType(),
ContentEncoding: dr.ContentEncoding(),
ContentLanguage: dr.ContentLanguage(),
ContentDisposition: dr.ContentDisposition(),
CacheControl: dr.CacheControl(),
ContentMD5: dr.ContentMD5(),
}
}
///////////////////////////////////////////////////////////////////////////////
// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
type DownloadResponse struct {
r *downloadResponse
ctx context.Context
b BlobURL
getInfo HTTPGetterInfo
}
// Body constructs new RetryReader stream for reading data. If a connection failes
// while reading, it will make additional requests to reestablish a connection and
// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
// (the default), returns the original response body and no retries will be performed.
func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
if o.MaxRetryRequests == 0 { // No additional retries
return r.Response().Body
}
return NewRetryReader(r.ctx, r.Response(), r.getInfo, o,
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
BlobAccessConditions{
HTTPAccessConditions: HTTPAccessConditions{IfMatch: getInfo.ETag},
},
false)
if err != nil {
return nil, err
}
return resp.Response(), err
},
)
}
// Response returns the raw HTTP response object.
func (r DownloadResponse) Response() *http.Response {
return r.r.Response()
}
// NewHTTPHeaders returns the user-modifiable properties for this blob.
func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
return r.r.NewHTTPHeaders()
}
// BlobContentMD5 returns the value for header x-ms-blob-content-md5.
func (r DownloadResponse) BlobContentMD5() []byte {
return r.r.BlobContentMD5()
}
// ContentMD5 returns the value for header Content-MD5.
func (r DownloadResponse) ContentMD5() []byte {
return r.r.ContentMD5()
}
// StatusCode returns the HTTP status code of the response, e.g. 200.
func (r DownloadResponse) StatusCode() int {
return r.r.StatusCode()
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
func (r DownloadResponse) Status() string {
return r.r.Status()
}
// AcceptRanges returns the value for header Accept-Ranges.
func (r DownloadResponse) AcceptRanges() string {
return r.r.AcceptRanges()
}
// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
func (r DownloadResponse) BlobCommittedBlockCount() int32 {
return r.r.BlobCommittedBlockCount()
}
// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
func (r DownloadResponse) BlobSequenceNumber() int64 {
return r.r.BlobSequenceNumber()
}
// BlobType returns the value for header x-ms-blob-type.
func (r DownloadResponse) BlobType() BlobType {
return r.r.BlobType()
}
// CacheControl returns the value for header Cache-Control.
func (r DownloadResponse) CacheControl() string {
return r.r.CacheControl()
}
// ContentDisposition returns the value for header Content-Disposition.
func (r DownloadResponse) ContentDisposition() string {
return r.r.ContentDisposition()
}
// ContentEncoding returns the value for header Content-Encoding.
func (r DownloadResponse) ContentEncoding() string {
return r.r.ContentEncoding()
}
// ContentLanguage returns the value for header Content-Language.
func (r DownloadResponse) ContentLanguage() string {
return r.r.ContentLanguage()
}
// ContentLength returns the value for header Content-Length.
func (r DownloadResponse) ContentLength() int64 {
return r.r.ContentLength()
}
// ContentRange returns the value for header Content-Range.
func (r DownloadResponse) ContentRange() string {
return r.r.ContentRange()
}
// ContentType returns the value for header Content-Type.
func (r DownloadResponse) ContentType() string {
return r.r.ContentType()
}
// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
func (r DownloadResponse) CopyCompletionTime() time.Time {
return r.r.CopyCompletionTime()
}
// CopyID returns the value for header x-ms-copy-id.
func (r DownloadResponse) CopyID() string {
return r.r.CopyID()
}
// CopyProgress returns the value for header x-ms-copy-progress.
func (r DownloadResponse) CopyProgress() string {
return r.r.CopyProgress()
}
// CopySource returns the value for header x-ms-copy-source.
func (r DownloadResponse) CopySource() string {
return r.r.CopySource()
}
// CopyStatus returns the value for header x-ms-copy-status.
func (r DownloadResponse) CopyStatus() CopyStatusType {
return r.r.CopyStatus()
}
// CopyStatusDescription returns the value for header x-ms-copy-status-description.
func (r DownloadResponse) CopyStatusDescription() string {
return r.r.CopyStatusDescription()
}
// Date returns the value for header Date.
func (r DownloadResponse) Date() time.Time {
return r.r.Date()
}
// ETag returns the value for header ETag.
func (r DownloadResponse) ETag() ETag {
return r.r.ETag()
}
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
func (r DownloadResponse) IsServerEncrypted() string {
return r.r.IsServerEncrypted()
}
// LastModified returns the value for header Last-Modified.
func (r DownloadResponse) LastModified() time.Time {
return r.r.LastModified()
}
// LeaseDuration returns the value for header x-ms-lease-duration.
func (r DownloadResponse) LeaseDuration() LeaseDurationType {
return r.r.LeaseDuration()
}
// LeaseState returns the value for header x-ms-lease-state.
func (r DownloadResponse) LeaseState() LeaseStateType {
return r.r.LeaseState()
}
// LeaseStatus returns the value for header x-ms-lease-status.
func (r DownloadResponse) LeaseStatus() LeaseStatusType {
return r.r.LeaseStatus()
}
// RequestID returns the value for header x-ms-request-id.
func (r DownloadResponse) RequestID() string {
return r.r.RequestID()
}
// Version returns the value for header x-ms-version.
func (r DownloadResponse) Version() string {
return r.r.Version()
}
// NewMetadata returns user-defined key/value pairs.
func (r DownloadResponse) NewMetadata() Metadata {
return r.r.NewMetadata()
}

21
vendor/github.com/Azure/azure-storage-blob-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View File

@ -1,10 +0,0 @@
# Azure Storage SDK for Go
[![GoDoc](https://godoc.org/github.com/Azure/azure-storage-go?status.svg)](https://godoc.org/github.com/Azure/azure-storage-go) [![Build Status](https://travis-ci.org/Azure/azure-storage-go.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-storage-go)](https://goreportcard.com/report/github.com/Azure/azure-storage-go)
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
# Contributing
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@ -1,223 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
import (
"bytes"
"fmt"
"net/url"
"sort"
"strings"
)
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
type authentication string
const (
sharedKey authentication = "sharedKey"
sharedKeyForTable authentication = "sharedKeyTable"
sharedKeyLite authentication = "sharedKeyLite"
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
// headers
headerAuthorization = "Authorization"
headerContentLength = "Content-Length"
headerDate = "Date"
headerXmsDate = "x-ms-date"
headerXmsVersion = "x-ms-version"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
)
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
authHeader, err := c.getSharedKey(verb, url, headers, auth)
if err != nil {
return nil, err
}
headers[headerAuthorization] = authHeader
return headers, nil
}
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
canRes, err := c.buildCanonicalizedResource(url, auth)
if err != nil {
return "", err
}
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
if err != nil {
return "", err
}
return c.createAuthorizationHeader(canString, auth), nil
}
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := bytes.NewBufferString("/")
cr.WriteString(c.getCanonicalizedAccountName())
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if auth == sharedKey {
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
} else {
// search for "comp" parameter, if exists then add it to canonicalizedresource
if v, ok := params["comp"]; ok {
cr.WriteString("?comp=" + v[0])
}
}
return string(cr.Bytes()), nil
}
func (c *Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
contentLength := headers[headerContentLength]
if contentLength == "0" {
contentLength = ""
}
date := headers[headerDate]
if v, ok := headers[headerXmsDate]; ok {
if auth == sharedKey || auth == sharedKeyLite {
date = ""
} else {
date = v
}
}
var canString string
switch auth {
case sharedKey:
canString = strings.Join([]string{
verb,
headers[headerContentEncoding],
headers[headerContentLanguage],
contentLength,
headers[headerContentMD5],
headers[headerContentType],
date,
headers[headerIfModifiedSince],
headers[headerIfMatch],
headers[headerIfNoneMatch],
headers[headerIfUnmodifiedSince],
headers[headerRange],
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyForTable:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
canonicalizedResource,
}, "\n")
case sharedKeyLite:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyLiteForTable:
canString = strings.Join([]string{
date,
canonicalizedResource,
}, "\n")
default:
return "", fmt.Errorf("%s authentication is not supported yet", auth)
}
return canString, nil
}
func buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
}
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
signature := c.computeHmac256(canonicalizedString)
var key string
switch auth {
case sharedKey, sharedKeyForTable:
key = "SharedKey"
case sharedKeyLite, sharedKeyLiteForTable:
key = "SharedKeyLite"
}
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,92 +0,0 @@
package storage
import (
"fmt"
"net/http"
"net/url"
)
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
// Service.
type BlobStorageClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
return b.client.getServiceProperties(blobServiceName, b.auth)
}
// SetServiceProperties sets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
return b.client.setServiceProperties(props, blobServiceName, b.auth)
}
// ListContainersParameters defines the set of customizable parameters to make a
// List Containers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ListContainersParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// GetContainerReference returns a Container object for the specified container name.
func (b BlobStorageClient) GetContainerReference(name string) Container {
return Container{
bsc: &b,
Name: name,
}
}
// ListContainers returns the list of containers in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := b.client.getEndpoint(blobServiceName, "", q)
headers := b.client.getStandardHeaders()
var out ContainerListResponse
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
if err != nil {
return nil, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out)
// assign our client to the newly created Container objects
for i := range out.Containers {
out.Containers[i].bsc = &b
}
return &out, err
}
func (p ListContainersParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}

View File

@ -1,479 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"runtime"
"strconv"
"strings"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
// DefaultBaseURL is the domain name used for storage requests in the
// public cloud when a default client is created.
DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2016-05-31"
defaultUseHTTPS = true
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
userAgentHeader = "User-Agent"
)
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. If it is nil, http.DefaultClient is used.
HTTPClient *http.Client
accountName string
accountKey []byte
useHTTPS bool
UseSharedKeyLite bool
baseURL string
apiVersion string
userAgent string
}
type storageResponse struct {
statusCode int
headers http.Header
body io.ReadCloser
}
type odataResponse struct {
storageResponse
odata odataErrorMessage
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
type AzureStorageServiceError struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
QueryParameterName string `xml:"QueryParameterName"`
QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"`
StatusCode int
RequestID string
}
type odataErrorMessageMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataErrorMessageInternal struct {
Code string `json:"code"`
Message odataErrorMessageMessage `json:"message"`
}
type odataErrorMessage struct {
Err odataErrorMessageInternal `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int
got int
}
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by Azure.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
// key in the referenced cloud.
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if accountName == "" {
return c, fmt.Errorf("azure: account name required")
} else if accountKey == "" {
return c, fmt.Errorf("azure: account key required")
} else if blobServiceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required")
}
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
c = Client{
accountName: accountName,
accountKey: key,
useHTTPS: useHTTPS,
baseURL: blobServiceBaseURL,
apiVersion: apiVersion,
UseSharedKeyLite: false,
}
c.userAgent = c.getDefaultUserAgent()
return c, nil
}
func (c Client) getDefaultUserAgent() string {
return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
sdkVersion,
c.apiVersion,
)
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
}
// protectUserAgent is used in funcs that include extraheaders as a parameter.
// It prevents the User-Agent header to be overwritten, instead if it happens to
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
if v, ok := extraheaders[userAgentHeader]; ok {
c.AddToUserAgent(v)
delete(extraheaders, userAgentHeader)
}
return extraheaders
}
func (c Client) getBaseURL(service string) string {
scheme := "http"
if c.useHTTPS {
scheme = "https"
}
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
u := &url.URL{
Scheme: scheme,
Host: host}
return u.String()
}
func (c Client) getEndpoint(service, path string, params url.Values) string {
u, err := url.Parse(c.getBaseURL(service))
if err != nil {
// really should not be happening
panic(err)
}
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
u.RawQuery = params.Encode()
return u.String()
}
// GetBlobService returns a BlobStorageClient which can operate on the blob
// service of the storage account.
func (c Client) GetBlobService() BlobStorageClient {
b := BlobStorageClient{
client: c,
}
b.client.AddToUserAgent(blobServiceName)
b.auth = sharedKey
if c.UseSharedKeyLite {
b.auth = sharedKeyLite
}
return b
}
// GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
q := QueueServiceClient{
client: c,
}
q.client.AddToUserAgent(queueServiceName)
q.auth = sharedKey
if c.UseSharedKeyLite {
q.auth = sharedKeyLite
}
return q
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
t := TableServiceClient{
client: c,
}
t.client.AddToUserAgent(tableServiceName)
t.auth = sharedKeyForTable
if c.UseSharedKeyLite {
t.auth = sharedKeyLiteForTable
}
return t
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
f := FileServiceClient{
client: c,
}
f.client.AddToUserAgent(fileServiceName)
f.auth = sharedKey
if c.UseSharedKeyLite {
f.auth = sharedKeyLite
}
return f
}
func (c Client) getStandardHeaders() map[string]string {
return map[string]string{
userAgentHeader: c.userAgent,
"x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(),
}
}
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
if err != nil {
return nil, errors.New("azure/storage: error creating request: " + err.Error())
}
if clstr, ok := headers["Content-Length"]; ok {
// content length header is being signed, but completely ignored by golang.
// instead we have to use the ContentLength property on the request struct
// (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
// https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
if err != nil {
return nil, err
}
}
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, err
}
requestID := resp.Header.Get("x-ms-request-id")
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
} else {
// response contains storage service error object, unmarshal
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
if err != nil { // error unmarshaling the error response
err = errIn
}
err = storageErr
}
return &storageResponse{
statusCode: resp.StatusCode,
headers: resp.Header,
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
}, err
}
return &storageResponse{
statusCode: resp.StatusCode,
headers: resp.Header,
body: resp.Body}, nil
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
respToRet := &odataResponse{}
respToRet.body = resp.Body
respToRet.statusCode = resp.StatusCode
respToRet.headers = resp.Header
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
return respToRet, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
return respToRet, err
}
return respToRet, nil
}
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
out, err := ioutil.ReadAll(body)
if err == io.EOF {
err = nil
}
return out, err
}
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
var storageErr AzureStorageServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
return storageErr, nil
}
func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
return AzureStorageServiceError{
StatusCode: code,
Code: status,
RequestID: requestID,
Message: "no response body was available for error status code",
}
}
func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}

View File

@ -1,376 +0,0 @@
package storage
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
// Container represents an Azure container.
type Container struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
}
func (c *Container) buildPath() string {
return fmt.Sprintf("/%s", c.Name)
}
// ContainerProperties contains various properties of a container returned from
// various endpoints like ListContainers.
type ContainerProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
}
// ContainerListResponse contains the response fields from
// ListContainers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ContainerListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []Container `xml:"Containers>Container"`
}
// BlobListResponse contains the response fields from ListBlobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type BlobListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Blobs []Blob `xml:"Blobs>Blob"`
// BlobPrefix is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
// The list here can be thought of as "folders" that may contain
// other folders or blobs.
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
// Delimiter is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
Delimiter string `xml:"Delimiter"`
}
// ListBlobsParameters defines the set of customizable
// parameters to make a List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include string
MaxResults uint
Timeout uint
}
func (p ListBlobsParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// ContainerAccessType defines the access level to the container from a public
// request.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
// blob-public-access" header.
type ContainerAccessType string
// Access options for containers
const (
ContainerAccessTypePrivate ContainerAccessType = ""
ContainerAccessTypeBlob ContainerAccessType = "blob"
ContainerAccessTypeContainer ContainerAccessType = "container"
)
// ContainerAccessPolicy represents each access policy in the container ACL.
type ContainerAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanWrite bool
CanDelete bool
}
// ContainerPermissions represents the container ACLs.
type ContainerPermissions struct {
AccessType ContainerAccessType
AccessPolicies []ContainerAccessPolicy
}
// ContainerAccessHeader references header used when setting/getting container ACL
const (
ContainerAccessHeader string = "x-ms-blob-public-access"
)
// Create creates a blob container within the storage account
// with given name and access level. Returns error if container already exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
func (c *Container) Create() error {
resp, err := c.create()
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// CreateIfNotExists creates a blob container if it does not exist. Returns
// true if container is newly created or false if container already exists.
func (c *Container) CreateIfNotExists() (bool, error) {
resp, err := c.create()
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
return resp.statusCode == http.StatusCreated, nil
}
}
return false, err
}
func (c *Container) create() (*storageResponse, error) {
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
headers := c.bsc.client.getStandardHeaders()
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
}
// Exists returns true if a container with given name exists
// on the storage account, otherwise returns false.
func (c *Container) Exists() (bool, error) {
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
headers := c.bsc.client.getStandardHeaders()
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, nil
}
}
return false, err
}
// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
if timeout > 0 {
params.Add("timeout", strconv.Itoa(timeout))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
headers := c.bsc.client.getStandardHeaders()
if permissions.AccessType != "" {
headers[ContainerAccessHeader] = string(permissions.AccessType)
}
if leaseID != "" {
headers[headerLeaseID] = leaseID
}
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
headers["Content-Length"] = strconv.Itoa(length)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return errors.New("Unable to set permissions")
}
return nil
}
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
// If timeout is 0 then it will not be passed to Azure
// leaseID will only be passed to Azure if populated
func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
if timeout > 0 {
params.Add("timeout", strconv.Itoa(timeout))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
headers := c.bsc.client.getStandardHeaders()
if leaseID != "" {
headers[headerLeaseID] = leaseID
}
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return nil, err
}
defer resp.body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildAccessPolicy(ap, &resp.headers), nil
}
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
// containerAccess. Blob, Container, empty
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
permissions := ContainerPermissions{
AccessType: ContainerAccessType(containerAccess),
AccessPolicies: []ContainerAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
capd := ContainerAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
}
return &permissions
}
// Delete deletes the container with given name on the storage
// account. If the container does not exist returns error.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
func (c *Container) Delete() error {
resp, err := c.delete()
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the container with given name on the storage
// account if it exists. Returns true if container is deleted with this call, or
// false if the container did not exist at the time of the Delete Container
// operation.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
func (c *Container) DeleteIfExists() (bool, error) {
resp, err := c.delete()
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (c *Container) delete() (*storageResponse, error) {
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
headers := c.bsc.client.getStandardHeaders()
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
}
// ListBlobs returns an object that contains list of blobs in the container,
// pagination token and other information in the response of List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{
"restype": {"container"},
"comp": {"list"}},
)
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
headers := c.bsc.client.getStandardHeaders()
var out BlobListResponse
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return out, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out)
return out, err
}
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, capd := range policies {
permission := capd.generateContainerPermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
// generate the permissions string (rwd).
// still want the end user API to have bool flags.
permissions = ""
if capd.CanRead {
permissions += "r"
}
if capd.CanWrite {
permissions += "w"
}
if capd.CanDelete {
permissions += "d"
}
return permissions
}

View File

@ -1,217 +0,0 @@
package storage
import (
"encoding/xml"
"net/http"
"net/url"
)
// Directory represents a directory on a share.
type Directory struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties DirectoryProperties
share *Share
}
// DirectoryProperties contains various properties of a directory.
type DirectoryProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
}
// ListDirsAndFilesParameters defines the set of customizable parameters to
// make a List Files and Directories call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
type ListDirsAndFilesParameters struct {
Marker string
MaxResults uint
Timeout uint
}
// DirsAndFilesListResponse contains the response fields from
// a List Files and Directories call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
type DirsAndFilesListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Marker string `xml:"Marker"`
MaxResults int64 `xml:"MaxResults"`
Directories []Directory `xml:"Entries>Directory"`
Files []File `xml:"Entries>File"`
NextMarker string `xml:"NextMarker"`
}
// builds the complete directory path for this directory object.
func (d *Directory) buildPath() string {
path := ""
current := d
for current.Name != "" {
path = "/" + current.Name + path
current = current.parent
}
return d.share.buildPath() + path
}
// Create this directory in the associated share.
// If a directory with the same name already exists, the operation fails.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
func (d *Directory) Create() error {
// if this is the root directory exit early
if d.parent == nil {
return nil
}
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this directory under the associated share if the
// directory does not exists. Returns true if the directory is newly created or
// false if the directory already exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
func (d *Directory) CreateIfNotExists() (bool, error) {
// if this is the root directory exit early
if d.parent == nil {
return false, nil
}
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
if resp.statusCode == http.StatusCreated {
d.updateEtagAndLastModified(resp.headers)
return true, nil
}
return false, d.FetchAttributes()
}
}
return false, err
}
// Delete removes this directory. It must be empty in order to be deleted.
// If the directory does not exist the operation fails.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
func (d *Directory) Delete() error {
return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
}
// DeleteIfExists removes this directory if it exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
func (d *Directory) DeleteIfExists() (bool, error) {
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this directory exists.
func (d *Directory) Exists() (bool, error) {
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
if exists {
d.updateEtagAndLastModified(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata for this directory.
func (d *Directory) FetchAttributes() error {
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
d.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetDirectoryReference returns a child Directory object for this directory.
func (d *Directory) GetDirectoryReference(name string) *Directory {
return &Directory{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
}
}
// GetFileReference returns a child File object for this directory.
func (d *Directory) GetFileReference(name string) *File {
return &File{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
}
}
// ListDirsAndFiles returns a list of files and directories under this directory.
// It also contains a pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
var out DirsAndFilesListResponse
err = xmlUnmarshal(resp.body, &out)
return &out, err
}
// SetMetadata replaces the metadata for this directory.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetDirectoryMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
func (d *Directory) SetMetadata() error {
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
d.Properties.Etag = headers.Get("Etag")
d.Properties.LastModified = headers.Get("Last-Modified")
}
// URL gets the canonical URL to this directory.
// This method does not create a publicly accessible URL if the directory
// is private and this method does not check if the directory exists.
func (d *Directory) URL() string {
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
}

View File

@ -1,412 +0,0 @@
package storage
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
const fourMB = uint64(4194304)
const oneTB = uint64(1099511627776)
// File represents a file on a share.
type File struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties FileProperties `xml:"Properties"`
share *Share
FileCopyProperties FileCopyState
}
// FileProperties contains various properties of a file.
type FileProperties struct {
CacheControl string `header:"x-ms-cache-control"`
Disposition string `header:"x-ms-content-disposition"`
Encoding string `header:"x-ms-content-encoding"`
Etag string
Language string `header:"x-ms-content-language"`
LastModified string
Length uint64 `xml:"Content-Length"`
MD5 string `header:"x-ms-content-md5"`
Type string `header:"x-ms-content-type"`
}
// FileCopyState contains various properties of a file copy operation.
type FileCopyState struct {
CompletionTime string
ID string `header:"x-ms-copy-id"`
Progress string
Source string
Status string `header:"x-ms-copy-status"`
StatusDesc string
}
// FileStream contains file data returned from a call to GetFile.
type FileStream struct {
Body io.ReadCloser
ContentMD5 string
}
// FileRequestOptions will be passed to misc file operations.
// Currently just Timeout (in seconds) but will expand.
type FileRequestOptions struct {
Timeout uint // timeout duration in seconds.
}
// getParameters, construct parameters for FileRequestOptions.
// currently only timeout, but expecting to grow as functionality fills out.
func (p FileRequestOptions) getParameters() url.Values {
out := url.Values{}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// FileRanges contains a list of file range information for a file.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
type FileRanges struct {
ContentLength uint64
LastModified string
ETag string
FileRanges []FileRange `xml:"Range"`
}
// FileRange contains range information for a file.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
type FileRange struct {
Start uint64 `xml:"Start"`
End uint64 `xml:"End"`
}
func (fr FileRange) String() string {
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
}
// builds the complete file path for this file object
func (f *File) buildPath() string {
return f.parent.buildPath() + "/" + f.Name
}
// ClearRange releases the specified range of space in a file.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
func (f *File) ClearRange(fileRange FileRange) error {
headers, err := f.modifyRange(nil, fileRange, nil)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// Create creates a new file or replaces an existing one.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
func (f *File) Create(maxSize uint64) error {
if maxSize > oneTB {
return fmt.Errorf("max file size is 1TB")
}
extraHeaders := map[string]string{
"x-ms-content-length": strconv.FormatUint(maxSize, 10),
"x-ms-type": "file",
}
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
if err != nil {
return err
}
f.Properties.Length = maxSize
f.updateEtagAndLastModified(headers)
return nil
}
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
extraHeaders := map[string]string{
"x-ms-type": "file",
"x-ms-copy-source": sourceURL,
}
var parameters url.Values
if options != nil {
parameters = options.getParameters()
}
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
if err != nil {
return err
}
f.updateEtagLastModifiedAndCopyHeaders(headers)
return nil
}
// Delete immediately removes this file from the storage account.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
func (f *File) Delete() error {
return f.fsc.deleteResource(f.buildPath(), resourceFile)
}
// DeleteIfExists removes this file if it exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
func (f *File) DeleteIfExists() (bool, error) {
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
return false, err
}
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
if getContentMD5 && isRangeTooBig(fileRange) {
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
}
extraHeaders := map[string]string{
"Range": fileRange.String(),
}
if getContentMD5 == true {
extraHeaders["x-ms-range-get-content-md5"] = "true"
}
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
if err != nil {
return fs, err
}
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
resp.body.Close()
return fs, err
}
fs.Body = resp.body
if getContentMD5 {
fs.ContentMD5 = resp.headers.Get("Content-MD5")
}
return fs, nil
}
// Exists returns true if this file exists.
func (f *File) Exists() (bool, error) {
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
if exists {
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
}
return exists, err
}
// FetchAttributes updates metadata and properties for this file.
func (f *File) FetchAttributes() error {
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
f.Metadata = getMetadataFromHeaders(headers)
return nil
}
// returns true if the range is larger than 4MB
func isRangeTooBig(fileRange FileRange) bool {
if fileRange.End-fileRange.Start > fourMB {
return true
}
return false
}
// ListRanges returns the list of valid ranges for this file.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
params := url.Values{"comp": {"rangelist"}}
// add optional range to list
var headers map[string]string
if listRange != nil {
headers = make(map[string]string)
headers["Range"] = listRange.String()
}
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
if err != nil {
return nil, err
}
defer resp.body.Close()
var cl uint64
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
if err != nil {
ioutil.ReadAll(resp.body)
return nil, err
}
var out FileRanges
out.ContentLength = cl
out.ETag = resp.headers.Get("ETag")
out.LastModified = resp.headers.Get("Last-Modified")
err = xmlUnmarshal(resp.body, &out)
return &out, err
}
// modifies a range of bytes in this file
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
if err := f.fsc.checkForStorageEmulator(); err != nil {
return nil, err
}
if fileRange.End < fileRange.Start {
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if bytes != nil && isRangeTooBig(fileRange) {
return nil, errors.New("range cannot exceed 4MB in size")
}
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
// default to clear
write := "clear"
cl := uint64(0)
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (fileRange.End - fileRange.Start) + 1
}
extraHeaders := map[string]string{
"Content-Length": strconv.FormatUint(cl, 10),
"Range": fileRange.String(),
"x-ms-write": write,
}
if contentMD5 != nil {
extraHeaders["Content-MD5"] = *contentMD5
}
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
if err != nil {
return nil, err
}
defer readAndCloseBody(resp.body)
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// SetMetadata replaces the metadata for this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetFileMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
func (f *File) SetMetadata() error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties on this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetFileProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
func (f *File) SetProperties() error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (f *File) updateEtagAndLastModified(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
}
// updates Etag, last modified date and x-ms-copy-id
func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
}
// updates file properties from the specified HTTP header
func (f *File) updateProperties(header http.Header) {
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
if err == nil {
f.Properties.Length = size
}
f.updateEtagAndLastModified(header)
f.Properties.CacheControl = header.Get("Cache-Control")
f.Properties.Disposition = header.Get("Content-Disposition")
f.Properties.Encoding = header.Get("Content-Encoding")
f.Properties.Language = header.Get("Content-Language")
f.Properties.MD5 = header.Get("Content-MD5")
f.Properties.Type = header.Get("Content-Type")
}
// URL gets the canonical URL to this file.
// This method does not create a publicly accessible URL if the file
// is private and this method does not check if the file exists.
func (f *File) URL() string {
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
}
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
headers, err := f.modifyRange(bytes, fileRange, contentMD5)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}

View File

@ -1,375 +0,0 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strings"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
type FileServiceClient struct {
client Client
auth authentication
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
type compType string
const (
compNone compType = ""
compList compType = "list"
compMetadata compType = "metadata"
compProperties compType = "properties"
compRangeList compType = "rangelist"
)
func (ct compType) String() string {
return string(ct)
}
type resourceType string
const (
resourceDirectory resourceType = "directory"
resourceFile resourceType = ""
resourceShare resourceType = "share"
)
func (rt resourceType) String() string {
return string(rt)
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
func (p ListDirsAndFilesParameters) getParameters() url.Values {
out := url.Values{}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// returns url.Values for the specified types
func getURLInitValues(comp compType, res resourceType) url.Values {
values := url.Values{}
if comp != compNone {
values.Set("comp", comp.String())
}
if res != resourceFile {
values.Set("restype", res.String())
}
return values
}
// GetShareReference returns a Share object for the specified share name.
func (f FileServiceClient) GetShareReference(name string) Share {
return Share{
fsc: &f,
Name: name,
Properties: ShareProperties{
Quota: -1,
},
}
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
var out ShareListResponse
resp, err := f.listContent("", q, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out)
// assign our client to the newly created Share objects
for i := range out.Shares {
out.Shares[i].fsc = &f
}
return &out, err
}
// GetServiceProperties gets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return f.client.getServiceProperties(fileServiceName, f.auth)
}
// SetServiceProperties sets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
return f.client.setServiceProperties(props, fileServiceName, f.auth)
}
// retrieves directory or share content
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
readAndCloseBody(resp.body)
return nil, err
}
return resp, nil
}
// returns true if the specified resource exists
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return false, nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
headers := f.client.getStandardHeaders()
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, resp.headers, nil
}
}
return false, nil, err
}
// creates a resource depending on the specified resource type
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
if err != nil {
return nil, err
}
defer readAndCloseBody(resp.body)
return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
}
// creates a resource depending on the specified resource type, doesn't close the response body
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := getURLInitValues(compNone, res)
combinedParams := mergeParams(values, urlParams)
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
}
// returns HTTP header data for the specified directory or share
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
if err != nil {
return nil, err
}
defer readAndCloseBody(resp.body)
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
return resp.headers, nil
}
// gets the specified resource, doesn't close the response body
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params := getURLInitValues(comp, res)
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(verb, uri, headers, nil, f.auth)
}
// deletes the resource and returns the response
func (f FileServiceClient) deleteResource(path string, res resourceType) error {
resp, err := f.deleteResourceNoClose(path, res)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}
// deletes the resource and returns the response, doesn't close the response body
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := getURLInitValues(compNone, res)
uri := f.client.getEndpoint(fileServiceName, path, values)
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
}
// merges metadata into extraHeaders and returns extraHeaders
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
if metadata == nil && extraHeaders == nil {
return nil
}
if extraHeaders == nil {
extraHeaders = make(map[string]string)
}
for k, v := range metadata {
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
}
return extraHeaders
}
// merges extraHeaders into headers and returns headers
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
for k, v := range extraHeaders {
headers[k] = v
}
return headers
}
// sets extra header data for the specified resource
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params := getURLInitValues(comp, res)
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
defer readAndCloseBody(resp.body)
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// gets metadata for the specified resource
func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
if err != nil {
return nil, err
}
return getMetadataFromHeaders(headers), nil
}
// returns a map of custom metadata values from the specified HTTP header
func getMetadataFromHeaders(header http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range header {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
if len(metadata) == 0 {
return nil
}
return metadata
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

View File

@ -1,14 +0,0 @@
hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e
updated: 2017-02-23T09:58:57.3701584-08:00
imports:
- name: github.com/Azure/go-autorest
version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
subpackages:
- autorest/azure
- autorest
- autorest/date
- name: github.com/dgrijalva/jwt-go
version: 2268707a8f0843315e2004ee4f1d021dc08baedf
testImports:
- name: gopkg.in/check.v1
version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec

View File

@ -1,4 +0,0 @@
package: github.com/Azure/azure-sdk-for-go-storage
import: []
testImport:
- package: gopkg.in/check.v1

View File

@ -1,339 +0,0 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
const (
// casing is per Golang's http.Header canonicalizing the header names.
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
)
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}
// PutMessageParameters is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageParameters struct {
VisibilityTimeout int
MessageTTL int
}
func (p PutMessageParameters) getParameters() url.Values {
out := url.Values{}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
if p.MessageTTL != 0 {
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
}
return out
}
// GetMessagesParameters is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesParameters struct {
NumOfMessages int
VisibilityTimeout int
}
func (p GetMessagesParameters) getParameters() url.Values {
out := url.Values{}
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
return out
}
// PeekMessagesParameters is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesParameters struct {
NumOfMessages int
}
func (p PeekMessagesParameters) getParameters() url.Values {
out := url.Values{"peekonly": {"true"}} // Required for peek operation
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
return out
}
// UpdateMessageParameters is the set of options can be specified for Update Messsage
// operation. A zero struct does not use any preferences for the request.
type UpdateMessageParameters struct {
PopReceipt string
VisibilityTimeout int
}
func (p UpdateMessageParameters) getParameters() url.Values {
out := url.Values{}
if p.PopReceipt != "" {
out.Set("popreceipt", p.PopReceipt)
}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
return out
}
// GetMessagesResponse represents a response returned from Get Messages
// operation.
type GetMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
}
// GetMessageResponse represents a QueueMessage object returned from Get
// Messages operation response.
type GetMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
TimeNextVisible string `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// PeekMessagesResponse represents a response returned from Get Messages
// operation.
type PeekMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
}
// PeekMessageResponse represents a QueueMessage object returned from Peek
// Messages operation response.
type PeekMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// QueueMetadataResponse represents user defined metadata and queue
// properties on a specific queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
type QueueMetadataResponse struct {
ApproximateMessageCount int
UserDefinedMetadata map[string]string
}
// SetMetadata operation sets user-defined metadata on the specified queue.
// Metadata is associated with the queue as name-value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
metadata = c.client.protectUserAgent(metadata)
headers := c.client.getStandardHeaders()
for k, v := range metadata {
headers[userDefinedMetadataHeaderPrefix+k] = v
}
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// GetMetadata operation retrieves user-defined metadata and queue
// properties on the specified queue. Metadata is associated with
// the queue as name-values pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
//
// Because the way Golang's http client (and http.Header in particular)
// canonicalize header names, the returned metadata names would always
// be all lower case.
func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
qm := QueueMetadataResponse{}
qm.UserDefinedMetadata = make(map[string]string)
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
headers := c.client.getStandardHeaders()
resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
if err != nil {
return qm, err
}
defer readAndCloseBody(resp.body)
for k, v := range resp.headers {
if len(v) != 1 {
return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
}
value := v[0]
if k == approximateMessagesCountHeader {
qm.ApproximateMessageCount, err = strconv.Atoi(value)
if err != nil {
return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
}
} else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
qm.UserDefinedMetadata[strings.ToLower(name)] = value
}
}
return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// CreateQueue operation creates a queue under the given account.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
func (c QueueServiceClient) CreateQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
headers := c.client.getStandardHeaders()
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// DeleteQueue operation permanently deletes the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
func (c QueueServiceClient) DeleteQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// QueueExists returns true if a queue with given name exists.
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
return resp.statusCode == http.StatusOK, nil
}
return false, err
}
// PutMessage operation adds a new message to the back of the message queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
req := putMessageRequest{MessageText: message}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers := c.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(nn)
resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
func (c QueueServiceClient) ClearMessages(queue string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
var r GetMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
var r PeekMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// DeleteMessage operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
"popreceipt": {popReceipt}})
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// UpdateMessage operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
req := putMessageRequest{MessageText: message}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers := c.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%d", nn)
resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}

View File

@ -1,20 +0,0 @@
package storage
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return c.client.getServiceProperties(queueServiceName, c.auth)
}
// SetServiceProperties sets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
return c.client.setServiceProperties(props, queueServiceName, c.auth)
}

View File

@ -1,186 +0,0 @@
package storage
import (
"fmt"
"net/http"
"net/url"
"strconv"
)
// Share represents an Azure file share.
type Share struct {
fsc *FileServiceClient
Name string `xml:"Name"`
Properties ShareProperties `xml:"Properties"`
Metadata map[string]string
}
// ShareProperties contains various properties of a share.
type ShareProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
Quota int `xml:"Quota"`
}
// builds the complete path for this share object.
func (s *Share) buildPath() string {
return fmt.Sprintf("/%s", s.Name)
}
// Create this share under the associated account.
// If a share with the same name already exists, the operation fails.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
func (s *Share) Create() error {
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this share under the associated account if
// it does not exist. Returns true if the share is newly created or false if
// the share already exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
func (s *Share) CreateIfNotExists() (bool, error) {
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
if resp.statusCode == http.StatusCreated {
s.updateEtagAndLastModified(resp.headers)
return true, nil
}
return false, s.FetchAttributes()
}
}
return false, err
}
// Delete marks this share for deletion. The share along with any files
// and directories contained within it are later deleted during garbage
// collection. If the share does not exist the operation fails
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
func (s *Share) Delete() error {
return s.fsc.deleteResource(s.buildPath(), resourceShare)
}
// DeleteIfExists operation marks this share for deletion if it exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
func (s *Share) DeleteIfExists() (bool, error) {
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
if resp != nil {
defer readAndCloseBody(resp.body)
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this share already exists
// on the storage account, otherwise returns false.
func (s *Share) Exists() (bool, error) {
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
if exists {
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata and properties for this share.
func (s *Share) FetchAttributes() error {
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
s.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetRootDirectoryReference returns a Directory object at the root of this share.
func (s *Share) GetRootDirectoryReference() *Directory {
return &Directory{
fsc: s.fsc,
share: s,
}
}
// ServiceClient returns the FileServiceClient associated with this share.
func (s *Share) ServiceClient() *FileServiceClient {
return s.fsc
}
// SetMetadata replaces the metadata for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetShareMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (s *Share) SetMetadata() error {
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetShareProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
func (s *Share) SetProperties() error {
if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
}
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
"x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
})
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (s *Share) updateEtagAndLastModified(headers http.Header) {
s.Properties.Etag = headers.Get("Etag")
s.Properties.LastModified = headers.Get("Last-Modified")
}
// updates quota value
func (s *Share) updateQuota(headers http.Header) {
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
if err == nil {
s.Properties.Quota = quota
}
}
// URL gets the canonical URL to this share. This method does not create a publicly accessible
// URL if the share is private and this method does not check if the share exists.
func (s *Share) URL() string {
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
}

View File

@ -1,47 +0,0 @@
package storage
import (
"strings"
"time"
)
// AccessPolicyDetailsXML has specifics about an access policy
// annotated with XML details.
type AccessPolicyDetailsXML struct {
StartTime time.Time `xml:"Start"`
ExpiryTime time.Time `xml:"Expiry"`
Permission string `xml:"Permission"`
}
// SignedIdentifier is a wrapper for a specific policy
type SignedIdentifier struct {
ID string `xml:"Id"`
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
}
// SignedIdentifiers part of the response from GetPermissions call.
type SignedIdentifiers struct {
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
}
// AccessPolicy is the response type from the GetPermissions call.
type AccessPolicy struct {
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
}
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
// AccessPolicy struct which will get converted to XML.
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
return SignedIdentifier{
ID: id,
AccessPolicy: AccessPolicyDetailsXML{
StartTime: startTime.UTC().Round(time.Second),
ExpiryTime: expiryTime.UTC().Round(time.Second),
Permission: permissions,
},
}
}
func updatePermissions(permissions, permission string) bool {
return strings.Contains(permissions, permission)
}

View File

@ -1,118 +0,0 @@
package storage
import (
"fmt"
"net/http"
"net/url"
)
// ServiceProperties represents the storage account service properties
type ServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
}
// Logging represents the Azure Analytics Logging settings
type Logging struct {
Version string
Delete bool
Read bool
Write bool
RetentionPolicy *RetentionPolicy
}
// RetentionPolicy indicates if retention is enabled and for how many days
type RetentionPolicy struct {
Enabled bool
Days *int
}
// Metrics provide request statistics.
type Metrics struct {
Version string
Enabled bool
IncludeAPIs *bool
RetentionPolicy *RetentionPolicy
}
// Cors includes all the CORS rules
type Cors struct {
CorsRule []CorsRule
}
// CorsRule includes all settings for a Cors rule
type CorsRule struct {
AllowedOrigins string
AllowedMethods string
MaxAgeInSeconds int
ExposedHeaders string
AllowedHeaders string
}
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
headers := c.getStandardHeaders()
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
var out ServiceProperties
err = xmlUnmarshal(resp.body, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
// Ideally, StorageServiceProperties would be the output struct
// This is to avoid golint stuttering, while generating the correct XML
type StorageServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
}
input := StorageServiceProperties{
Logging: props.Logging,
HourMetrics: props.HourMetrics,
MinuteMetrics: props.MinuteMetrics,
Cors: props.Cors,
}
body, length, err := xmlMarshal(input)
if err != nil {
return err
}
headers := c.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", length)
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}

View File

@ -1,254 +0,0 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
// AzureTable is the typedef of the Azure Table name
type AzureTable string
const (
tablesURIPath = "/Tables"
)
type createTableRequest struct {
TableName string `json:"TableName"`
}
// TableAccessPolicy are used for SETTING table policies
type TableAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAppend bool
CanUpdate bool
CanDelete bool
}
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
func (c *TableServiceClient) getStandardHeaders() map[string]string {
return map[string]string{
"x-ms-version": "2015-02-21",
"x-ms-date": currentTimeRfc1123Formatted(),
"Accept": "application/json;odata=nometadata",
"Accept-Charset": "UTF-8",
"Content-Type": "application/json",
userAgentHeader: c.client.userAgent,
}
}
// QueryTables returns the tables created in the
// *TableServiceClient storage account.
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
ioutil.ReadAll(resp.body)
return nil, err
}
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(resp.body); err != nil {
return nil, err
}
var respArray queryTablesResponse
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
return nil, err
}
s := make([]AzureTable, len(respArray.TableName))
for i, elem := range respArray.TableName {
s[i] = AzureTable(elem.TableName)
}
return s, nil
}
// CreateTable creates the table given the specific
// name. This function fails if the name is not compliant
// with the specification or the tables already exists.
func (c *TableServiceClient) CreateTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
req := createTableRequest{TableName: string(table)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
return err
}
return nil
}
// DeleteTable deletes the table given the specific
// name. This function fails if the table is not present.
// Be advised: DeleteTable deletes all the entries
// that may be present.
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
uri += fmt.Sprintf("('%s')", string(table))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
params := url.Values{"comp": {"acl"}}
if timeout > 0 {
params.Add("timeout", fmt.Sprint(timeout))
}
uri := c.client.getEndpoint(tableServiceName, string(table), params)
headers := c.client.getStandardHeaders()
body, length, err := generateTableACLPayload(policies)
if err != nil {
return err
}
headers["Content-Length"] = fmt.Sprintf("%v", length)
resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
if err != nil {
return err
}
defer readAndCloseBody(resp.body)
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, tap := range policies {
permission := generateTablePermissions(&tap)
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
params := url.Values{"comp": {"acl"}}
if timeout > 0 {
params.Add("timeout", strconv.Itoa(timeout))
}
uri := c.client.getEndpoint(tableServiceName, string(table), params)
headers := c.client.getStandardHeaders()
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
ioutil.ReadAll(resp.body)
return nil, err
}
var ap AccessPolicy
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
out := updateTableAccessPolicy(ap)
return out, nil
}
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
out := []TableAccessPolicy{}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
tap := TableAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
out = append(out, tap)
}
return out
}
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
// generate the permissions string (raud).
// still want the end user API to have bool flags.
permissions = ""
if tap.CanRead {
permissions += "r"
}
if tap.CanAppend {
permissions += "a"
}
if tap.CanUpdate {
permissions += "u"
}
if tap.CanDelete {
permissions += "d"
}
return permissions
}

View File

@ -1,354 +0,0 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
)
// Annotating as secure for gas scanning
/* #nosec */
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
tag = "table"
tagIgnore = "-"
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
maxTopParameter = 1000
)
type queryTablesResponse struct {
TableName []struct {
TableName string `json:"TableName"`
} `json:"value"`
}
const (
tableOperationTypeInsert = iota
tableOperationTypeUpdate = iota
tableOperationTypeMerge = iota
tableOperationTypeInsertOrReplace = iota
tableOperationTypeInsertOrMerge = iota
)
type tableOperation int
// TableEntity interface specifies
// the functions needed to support
// marshaling and unmarshaling into
// Azure Tables. The struct must only contain
// simple types because Azure Tables do not
// support hierarchy.
type TableEntity interface {
PartitionKey() string
RowKey() string
SetPartitionKey(string) error
SetRowKey(string) error
}
// ContinuationToken is an opaque (ie not useful to inspect)
// struct that Get... methods can return if there are more
// entries to be returned than the ones already
// returned. Just pass it to the same function to continue
// receiving the remaining entries.
type ContinuationToken struct {
NextPartitionKey string
NextRowKey string
}
type getTableEntriesResponse struct {
Elements []map[string]interface{} `json:"value"`
}
// QueryTableEntities queries the specified table and returns the unmarshaled
// entities of type retType.
// top parameter limits the returned entries up to top. Maximum top
// allowed by Azure API is 1000. In case there are more than top entries to be
// returned the function will return a non nil *ContinuationToken. You can call the
// same function again passing the received ContinuationToken as previousContToken
// parameter in order to get the following entries. The query parameter
// is the odata query. To retrieve all the entries pass the empty string.
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
// if there are more entries to be returned and an error in case something went
// wrong.
//
// Example:
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
if top > maxTopParameter {
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
}
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
uri += fmt.Sprintf("?$top=%d", top)
if query != "" {
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
}
if previousContToken != nil {
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
}
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
if err != nil {
return nil, nil, err
}
contToken := extractContinuationTokenFromHeaders(resp.headers)
defer resp.body.Close()
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, contToken, err
}
retEntries, err := deserializeEntity(retType, resp.body)
if err != nil {
return nil, contToken, err
}
return retEntries, contToken, nil
}
// InsertEntity inserts an entity in the specified table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
sc, err := c.execTable(table, entity, false, http.MethodPost)
if err != nil {
return err
}
return checkRespCode(sc, []int{http.StatusCreated})
}
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
if specifyKeysInURL {
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
}
headers := c.getStandardHeaders()
var buf bytes.Buffer
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
return 0, err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
if err != nil {
return 0, err
}
defer resp.body.Close()
return resp.statusCode, nil
}
// UpdateEntity updates the contents of an entity with the
// one passed as parameter. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
sc, err := c.execTable(table, entity, true, http.MethodPut)
if err != nil {
return err
}
return checkRespCode(sc, []int{http.StatusNoContent})
}
// MergeEntity merges the contents of an entity with the
// one passed as parameter.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
sc, err := c.execTable(table, entity, true, "MERGE")
if err != nil {
return err
}
return checkRespCode(sc, []int{http.StatusNoContent})
}
// DeleteEntityWithoutCheck deletes the entity matching by
// PartitionKey and RowKey. There is no check on IfMatch
// parameter so the entity is always deleted.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
return c.DeleteEntity(table, entity, "*")
}
// DeleteEntity deletes the entity matching by
// PartitionKey, RowKey and ifMatch field.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or
// the ifMatch is different.
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
headers["If-Match"] = ifMatch
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
// InsertOrReplaceEntity inserts an entity in the specified table
// or replaced the existing one.
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
sc, err := c.execTable(table, entity, true, http.MethodPut)
if err != nil {
return err
}
return checkRespCode(sc, []int{http.StatusNoContent})
}
// InsertOrMergeEntity inserts an entity in the specified table
// or merges the existing one.
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
sc, err := c.execTable(table, entity, true, "MERGE")
if err != nil {
return err
}
return checkRespCode(sc, []int{http.StatusNoContent})
}
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
if err := json.NewEncoder(buf).Encode(entity); err != nil {
return err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return err
}
// Inject PartitionKey and RowKey
dec[partitionKeyNode] = entity.PartitionKey()
dec[rowKeyNode] = entity.RowKey()
// Remove tagged fields
// The tag is defined in the const section
// This is useful to avoid storing the PartitionKey and RowKey twice.
numFields := reflect.ValueOf(entity).Elem().NumField()
for i := 0; i < numFields; i++ {
f := reflect.ValueOf(entity).Elem().Type().Field(i)
if f.Tag.Get(tag) == tagIgnore {
// we must look for its JSON name in the dictionary
// as the user can rename it using a tag
jsonName := f.Name
if f.Tag.Get("json") != "" {
jsonName = f.Tag.Get("json")
}
delete(dec, jsonName)
}
}
buf.Reset()
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
return err
}
return nil
}
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
buf := new(bytes.Buffer)
var ret getTableEntriesResponse
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
return nil, err
}
tEntries := make([]TableEntity, len(ret.Elements))
for i, entry := range ret.Elements {
buf.Reset()
if err := json.NewEncoder(buf).Encode(entry); err != nil {
return nil, err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return nil, err
}
var pKey, rKey string
// strip pk and rk
for key, val := range dec {
switch key {
case partitionKeyNode:
pKey = val.(string)
case rowKeyNode:
rKey = val.(string)
}
}
delete(dec, partitionKeyNode)
delete(dec, rowKeyNode)
buf.Reset()
if err := json.NewEncoder(buf).Encode(dec); err != nil {
return nil, err
}
// Create a empty retType instance
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
// Popolate it with the values
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
return nil, err
}
// Reset PartitionKey and RowKey
if err := tEntries[i].SetPartitionKey(pKey); err != nil {
return nil, err
}
if err := tEntries[i].SetRowKey(rKey); err != nil {
return nil, err
}
}
return tEntries, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
return &ct
}
return nil
}

View File

@ -1,20 +0,0 @@
package storage
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return c.client.getServiceProperties(tableServiceName, c.auth)
}
// SetServiceProperties sets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
return c.client.setServiceProperties(props, tableServiceName, c.auth)
}

View File

@ -1,85 +0,0 @@
package storage
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"time"
)
func (c Client) computeHmac256(message string) string {
h := hmac.New(sha256.New, c.accountKey)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func currentTimeRfc1123Formatted() string {
return timeRfc1123Formatted(time.Now().UTC())
}
func timeRfc1123Formatted(t time.Time) string {
return t.Format(http.TimeFormat)
}
func mergeParams(v1, v2 url.Values) url.Values {
out := url.Values{}
for k, v := range v1 {
out[k] = v
}
for k, v := range v2 {
vals, ok := out[k]
if ok {
vals = append(vals, v...)
out[k] = vals
} else {
out[k] = v
}
}
return out
}
func prepareBlockListRequest(blocks []Block) string {
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
for _, v := range blocks {
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
}
s += `</BlockList>`
return s
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
func xmlMarshal(v interface{}) (io.Reader, int, error) {
b, err := xml.Marshal(v)
if err != nil {
return nil, 0, err
}
return bytes.NewReader(b), len(b), nil
}
func headersFromStruct(v interface{}) map[string]string {
headers := make(map[string]string)
value := reflect.ValueOf(v)
for i := 0; i < value.NumField(); i++ {
key := value.Type().Field(i).Tag.Get("header")
val := value.Field(i).String()
if key != "" && val != "" {
headers[key] = val
}
}
return headers
}

View File

@ -1,5 +0,0 @@
package storage
var (
sdkVersion = "0.1.0"
)

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,115 +0,0 @@
/*
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
generated Go code.
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
and Responding. A typical pattern is:
req, err := Prepare(&http.Request{},
token.WithAuthorization())
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
and then pass the data along, pass the data first and then modify the result, or wrap themselves
around passing the data (such as a logger might do). Decorators run in the order provided. For
example, the following:
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
will set the URL to:
https://microsoft.com/a/b/c
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
all bound together by means of input / output channels.
Decorators hold their passed state within a closure (such as the path components in the example
above). Be careful to share Preparers and Responders only in a context where such held state
applies. For example, it may not make sense to share a Preparer that applies a query string from a
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
struct (e.g., ByUnmarshallingJson) is likely incorrect.
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
correct parsing and formatting.
Errors raised by autorest objects and methods will conform to the autorest.Error interface.
See the included examples for more detail. For details on the suggested use of this package by
generated clients, see the Client described below.
*/
package autorest
import (
"net/http"
"time"
)
const (
// HeaderLocation specifies the HTTP Location header.
HeaderLocation = "Location"
// HeaderRetryAfter specifies the HTTP Retry-After header.
HeaderRetryAfter = "Retry-After"
)
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
// and false otherwise.
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
return containsInt(codes, resp.StatusCode)
}
// GetLocation retrieves the URL from the Location header of the passed response.
func GetLocation(resp *http.Response) string {
return resp.Header.Get(HeaderLocation)
}
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
retry := resp.Header.Get(HeaderRetryAfter)
if retry == "" {
return defaultDelay
}
d, err := time.ParseDuration(retry + "s")
if err != nil {
return defaultDelay
}
return d
}
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
location := GetLocation(resp)
if location == "" {
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
}
req, err := Prepare(&http.Request{Cancel: cancel},
AsGet(),
WithBaseURL(location))
if err != nil {
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
}
return req, nil
}

View File

@ -1,308 +0,0 @@
package azure
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/date"
)
const (
headerAsyncOperation = "Azure-AsyncOperation"
)
const (
methodDelete = "DELETE"
methodPatch = "PATCH"
methodPost = "POST"
methodPut = "PUT"
methodGet = "GET"
operationInProgress string = "InProgress"
operationCanceled string = "Canceled"
operationFailed string = "Failed"
operationSucceeded string = "Succeeded"
)
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
// long-running operation. It will delay between requests for the duration specified in the
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
// closing the optional channel on the http.Request.
func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
resp, err = s.Do(r)
if err != nil {
return resp, err
}
pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
return resp, nil
}
ps := pollingState{}
for err == nil {
err = updatePollingState(resp, &ps)
if err != nil {
break
}
if ps.hasTerminated() {
if !ps.hasSucceeded() {
err = ps
}
break
}
r, err = newPollingRequest(resp, ps)
if err != nil {
return resp, err
}
delay = autorest.GetRetryAfter(resp, delay)
resp, err = autorest.SendWithSender(s, r,
autorest.AfterDelay(delay))
}
return resp, err
})
}
}
func getAsyncOperation(resp *http.Response) string {
return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
}
func hasSucceeded(state string) bool {
return state == operationSucceeded
}
func hasTerminated(state string) bool {
switch state {
case operationCanceled, operationFailed, operationSucceeded:
return true
default:
return false
}
}
func hasFailed(state string) bool {
return state == operationFailed
}
type provisioningTracker interface {
state() string
hasSucceeded() bool
hasTerminated() bool
}
type operationResource struct {
// Note:
// The specification states services should return the "id" field. However some return it as
// "operationId".
ID string `json:"id"`
OperationID string `json:"operationId"`
Name string `json:"name"`
Status string `json:"status"`
Properties map[string]interface{} `json:"properties"`
OperationError ServiceError `json:"error"`
StartTime date.Time `json:"startTime"`
EndTime date.Time `json:"endTime"`
PercentComplete float64 `json:"percentComplete"`
}
func (or operationResource) state() string {
return or.Status
}
func (or operationResource) hasSucceeded() bool {
return hasSucceeded(or.state())
}
func (or operationResource) hasTerminated() bool {
return hasTerminated(or.state())
}
type provisioningProperties struct {
ProvisioningState string `json:"provisioningState"`
}
type provisioningStatus struct {
Properties provisioningProperties `json:"properties,omitempty"`
ProvisioningError ServiceError `json:"error,omitempty"`
}
func (ps provisioningStatus) state() string {
return ps.Properties.ProvisioningState
}
func (ps provisioningStatus) hasSucceeded() bool {
return hasSucceeded(ps.state())
}
func (ps provisioningStatus) hasTerminated() bool {
return hasTerminated(ps.state())
}
func (ps provisioningStatus) hasProvisioningError() bool {
return ps.ProvisioningError != ServiceError{}
}
type pollingResponseFormat string
const (
usesOperationResponse pollingResponseFormat = "OperationResponse"
usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
formatIsUnknown pollingResponseFormat = ""
)
type pollingState struct {
responseFormat pollingResponseFormat
uri string
state string
code string
message string
}
func (ps pollingState) hasSucceeded() bool {
return hasSucceeded(ps.state)
}
func (ps pollingState) hasTerminated() bool {
return hasTerminated(ps.state)
}
func (ps pollingState) hasFailed() bool {
return hasFailed(ps.state)
}
func (ps pollingState) Error() string {
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
}
// updatePollingState maps the operation status -- retrieved from either a provisioningState
// field, the status field of an OperationResource, or inferred from the HTTP status code --
// into a well-known states. Since the process begins from the initial request, the state
// always comes from either a the provisioningState returned or is inferred from the HTTP
// status code. Subsequent requests will read an Azure OperationResource object if the
// service initially returned the Azure-AsyncOperation header. The responseFormat field notes
// the expected response format.
func updatePollingState(resp *http.Response, ps *pollingState) error {
// Determine the response shape
// -- The first response will always be a provisioningStatus response; only the polling requests,
// depending on the header returned, may be something otherwise.
var pt provisioningTracker
if ps.responseFormat == usesOperationResponse {
pt = &operationResource{}
} else {
pt = &provisioningStatus{}
}
// If this is the first request (that is, the polling response shape is unknown), determine how
// to poll and what to expect
if ps.responseFormat == formatIsUnknown {
req := resp.Request
if req == nil {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
}
// Prefer the Azure-AsyncOperation header
ps.uri = getAsyncOperation(resp)
if ps.uri != "" {
ps.responseFormat = usesOperationResponse
} else {
ps.responseFormat = usesProvisioningStatus
}
// Else, use the Location header
if ps.uri == "" {
ps.uri = autorest.GetLocation(resp)
}
// Lastly, requests against an existing resource, use the last request URI
if ps.uri == "" {
m := strings.ToUpper(req.Method)
if m == methodPatch || m == methodPut || m == methodGet {
ps.uri = req.URL.String()
}
}
}
// Read and interpret the response (saving the Body in case no polling is necessary)
b := &bytes.Buffer{}
err := autorest.Respond(resp,
autorest.ByCopying(b),
autorest.ByUnmarshallingJSON(pt),
autorest.ByClosing())
resp.Body = ioutil.NopCloser(b)
if err != nil {
return err
}
// Interpret the results
// -- Terminal states apply regardless
// -- Unknown states are per-service inprogress states
// -- Otherwise, infer state from HTTP status code
if pt.hasTerminated() {
ps.state = pt.state()
} else if pt.state() != "" {
ps.state = operationInProgress
} else {
switch resp.StatusCode {
case http.StatusAccepted:
ps.state = operationInProgress
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
ps.state = operationSucceeded
default:
ps.state = operationFailed
}
}
if ps.state == operationInProgress && ps.uri == "" {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
}
// For failed operation, check for error code and message in
// -- Operation resource
// -- Response
// -- Otherwise, Unknown
if ps.hasFailed() {
if ps.responseFormat == usesOperationResponse {
or := pt.(*operationResource)
ps.code = or.OperationError.Code
ps.message = or.OperationError.Message
} else {
p := pt.(*provisioningStatus)
if p.hasProvisioningError() {
ps.code = p.ProvisioningError.Code
ps.message = p.ProvisioningError.Message
} else {
ps.code = "Unknown"
ps.message = "None"
}
}
}
return nil
}
func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
req := resp.Request
if req == nil {
return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
}
reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
autorest.AsGet(),
autorest.WithBaseURL(ps.uri))
if err != nil {
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
}
return reqPoll, nil
}

View File

@ -1,180 +0,0 @@
/*
Package azure provides Azure-specific implementations used with AutoRest.
See the included examples for more detail.
*/
package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"github.com/Azure/go-autorest/autorest"
)
const (
// HeaderClientID is the Azure extension header to set a user-specified request ID.
HeaderClientID = "x-ms-client-request-id"
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
// should be included in the response.
HeaderReturnClientID = "x-ms-return-client-request-id"
// HeaderRequestID is the Azure extension header of the service generated request ID returned
// in the response.
HeaderRequestID = "x-ms-request-id"
)
// ServiceError encapsulates the error response from an Azure service.
type ServiceError struct {
Code string `json:"code"`
Message string `json:"message"`
Details *[]interface{} `json:"details"`
}
func (se ServiceError) Error() string {
if se.Details != nil {
d, err := json.Marshal(*(se.Details))
if err != nil {
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
}
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
}
return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
}
// RequestError describes an error response returned by Azure service.
type RequestError struct {
autorest.DetailedError
// The error returned by the Azure service.
ServiceError *ServiceError `json:"error"`
// The request id (from the x-ms-request-id-header) of the request.
RequestID string
}
// Error returns a human-friendly error message from service error.
func (e RequestError) Error() string {
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
e.StatusCode, e.ServiceError)
}
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
func IsAzureError(e error) bool {
_, ok := e.(*RequestError)
return ok
}
// NewErrorWithError creates a new Error conforming object from the
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
// if resp is nil), message, and original error. message is treated as a format
// string to which the optional args apply.
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
if v, ok := original.(*RequestError); ok {
return *v
}
statusCode := autorest.UndefinedStatusCode
if resp != nil {
statusCode = resp.StatusCode
}
return RequestError{
DetailedError: autorest.DetailedError{
Original: original,
PackageType: packageType,
Method: method,
StatusCode: statusCode,
Message: fmt.Sprintf(message, args...),
},
}
}
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
// header to true such that UUID accompanies the http.Response.
func WithReturningClientID(uuid string) autorest.PrepareDecorator {
preparer := autorest.CreatePreparer(
WithClientID(uuid),
WithReturnClientID(true))
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
return preparer.Prepare(r)
})
}
}
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
func WithClientID(uuid string) autorest.PrepareDecorator {
return autorest.WithHeader(HeaderClientID, uuid)
}
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-return-client-request-id whose boolean value indicates if the value of the
// x-ms-client-request-id header should be included in the http.Response.
func WithReturnClientID(b bool) autorest.PrepareDecorator {
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
}
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
// http.Request sent to the service (and returned in the http.Response)
func ExtractClientID(resp *http.Response) string {
return autorest.ExtractHeaderValue(HeaderClientID, resp)
}
// ExtractRequestID extracts the Azure server generated request identifier from the
// x-ms-request-id header.
func ExtractRequestID(resp *http.Response) string {
return autorest.ExtractHeaderValue(HeaderRequestID, resp)
}
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
// azure.RequestError by reading the response body unless the response HTTP status code
// is among the set passed.
//
// If there is a chance service may return responses other than the Azure error
// format and the response cannot be parsed into an error, a decoding error will
// be returned containing the response body. In any case, the Responder will
// return an error if the status code is not satisfied.
//
// If this Responder returns an error, the response body will be replaced with
// an in-memory reader, which needs no further closing.
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
return func(r autorest.Responder) autorest.Responder {
return autorest.ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
var e RequestError
defer resp.Body.Close()
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
resp.Body = ioutil.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
} else if e.ServiceError == nil {
e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
}
e.RequestID = ExtractRequestID(resp)
if e.StatusCode == nil {
e.StatusCode = resp.StatusCode
}
err = &e
}
return err
})
}
}

View File

@ -1,13 +0,0 @@
package azure
import (
"net/url"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
AuthorizeEndpoint url.URL
TokenEndpoint url.URL
DeviceCodeEndpoint url.URL
}

View File

@ -1,193 +0,0 @@
package azure
/*
This file is largely based on rjw57/oauth2device's code, with the follow differences:
* scope -> resource, and only allow a single one
* receive "Message" in the DeviceCode struct and show it to users as the prompt
* azure-xplat-cli has the following behavior that this emulates:
- does not send client_secret during the token exchange
- sends resource again in the token exchange request
*/
import (
"fmt"
"net/http"
"net/url"
"time"
"github.com/Azure/go-autorest/autorest"
)
const (
logPrefix = "autorest/azure/devicetoken:"
)
var (
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
errTokenSendingFails = "Error occurred while sending request with device code for a token"
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
)
// DeviceCode is the object returned by the device auth endpoint
// It contains information to instruct the user to complete the auth flow
type DeviceCode struct {
DeviceCode *string `json:"device_code,omitempty"`
UserCode *string `json:"user_code,omitempty"`
VerificationURL *string `json:"verification_url,omitempty"`
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
Interval *int64 `json:"interval,string,omitempty"`
Message *string `json:"message"` // Azure specific
Resource string // store the following, stored when initiating, used when exchanging
OAuthConfig OAuthConfig
ClientID string
}
// TokenError is the object returned by the token exchange endpoint
// when something is amiss
type TokenError struct {
Error *string `json:"error,omitempty"`
ErrorCodes []int `json:"error_codes,omitempty"`
ErrorDescription *string `json:"error_description,omitempty"`
Timestamp *string `json:"timestamp,omitempty"`
TraceID *string `json:"trace_id,omitempty"`
}
// DeviceToken is the object return by the token exchange endpoint
// It can either look like a Token or an ErrorToken, so put both here
// and check for presence of "Error" to know if we are in error state
type deviceToken struct {
Token
TokenError
}
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
req, _ := autorest.Prepare(
&http.Request{},
autorest.AsPost(),
autorest.AsFormURLEncoded(),
autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()),
autorest.WithFormData(url.Values{
"client_id": []string{clientID},
"resource": []string{resource},
}),
)
resp, err := autorest.SendWithSender(client, req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err)
}
var code DeviceCode
err = autorest.Respond(
resp,
autorest.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&code),
autorest.ByClosing())
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err)
}
code.ClientID = clientID
code.Resource = resource
code.OAuthConfig = oauthConfig
return &code, nil
}
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
// to see if the device flow has: been completed, timed out, or otherwise failed
func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
req, _ := autorest.Prepare(
&http.Request{},
autorest.AsPost(),
autorest.AsFormURLEncoded(),
autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()),
autorest.WithFormData(url.Values{
"client_id": []string{code.ClientID},
"code": []string{*code.DeviceCode},
"grant_type": []string{OAuthGrantTypeDeviceCode},
"resource": []string{code.Resource},
}),
)
resp, err := autorest.SendWithSender(client, req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err)
}
var token deviceToken
err = autorest.Respond(
resp,
autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest),
autorest.ByUnmarshallingJSON(&token),
autorest.ByClosing())
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err)
}
if token.Error == nil {
return &token.Token, nil
}
switch *token.Error {
case "authorization_pending":
return nil, ErrDeviceAuthorizationPending
case "slow_down":
return nil, ErrDeviceSlowDown
case "access_denied":
return nil, ErrDeviceAccessDenied
case "code_expired":
return nil, ErrDeviceCodeExpired
default:
return nil, ErrDeviceGeneric
}
}
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
intervalDuration := time.Duration(*code.Interval) * time.Second
waitDuration := intervalDuration
for {
token, err := CheckForUserCompletion(client, code)
if err == nil {
return token, nil
}
switch err {
case ErrDeviceSlowDown:
waitDuration += waitDuration
case ErrDeviceAuthorizationPending:
// noop
default: // everything else is "fatal" to us
return nil, err
}
if waitDuration > (intervalDuration * 3) {
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
}
time.Sleep(waitDuration)
}
}

View File

@ -1,167 +0,0 @@
package azure
import (
"fmt"
"net/url"
"strings"
)
const (
activeDirectoryAPIVersion = "1.0"
)
var environments = map[string]Environment{
"AZURECHINACLOUD": ChinaCloud,
"AZUREGERMANCLOUD": GermanCloud,
"AZUREPUBLICCLOUD": PublicCloud,
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
}
// Environment represents a set of endpoints for each of Azure's Clouds.
type Environment struct {
Name string `json:"name"`
ManagementPortalURL string `json:"managementPortalURL"`
PublishSettingsURL string `json:"publishSettingsURL"`
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
GalleryEndpoint string `json:"galleryEndpoint"`
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
GraphEndpoint string `json:"graphEndpoint"`
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
}
var (
// PublicCloud is the default public Azure cloud environment
PublicCloud = Environment{
Name: "AzurePublicCloud",
ManagementPortalURL: "https://manage.windowsazure.com/",
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.windows.net/",
ResourceManagerEndpoint: "https://management.azure.com/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
GalleryEndpoint: "https://gallery.azure.com/",
KeyVaultEndpoint: "https://vault.azure.net/",
GraphEndpoint: "https://graph.windows.net/",
StorageEndpointSuffix: "core.windows.net",
SQLDatabaseDNSSuffix: "database.windows.net",
TrafficManagerDNSSuffix: "trafficmanager.net",
KeyVaultDNSSuffix: "vault.azure.net",
ServiceBusEndpointSuffix: "servicebus.azure.com",
ServiceManagementVMDNSSuffix: "cloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// USGovernmentCloud is the cloud environment for the US Government
USGovernmentCloud = Environment{
Name: "AzureUSGovernmentCloud",
ManagementPortalURL: "https://manage.windowsazure.us/",
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
GraphEndpoint: "https://graph.usgovcloudapi.net/",
StorageEndpointSuffix: "core.usgovcloudapi.net",
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// ChinaCloud is the cloud environment operated in China
ChinaCloud = Environment{
Name: "AzureChinaCloud",
ManagementPortalURL: "https://manage.chinacloudapi.com/",
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
KeyVaultEndpoint: "https://vault.azure.cn/",
GraphEndpoint: "https://graph.chinacloudapi.cn/",
StorageEndpointSuffix: "core.chinacloudapi.cn",
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
TrafficManagerDNSSuffix: "trafficmanager.cn",
KeyVaultDNSSuffix: "vault.azure.cn",
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// GermanCloud is the cloud environment operated in Germany
GermanCloud = Environment{
Name: "AzureGermanCloud",
ManagementPortalURL: "http://portal.microsoftazure.de/",
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
ResourceManagerEndpoint: "https://management.microsoftazure.de/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
GalleryEndpoint: "https://gallery.cloudapi.de/",
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
GraphEndpoint: "https://graph.cloudapi.de/",
StorageEndpointSuffix: "core.cloudapi.de",
SQLDatabaseDNSSuffix: "database.cloudapi.de",
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
KeyVaultDNSSuffix: "vault.microsoftazure.de",
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
ContainerRegistryDNSSuffix: "azurecr.io",
}
)
// EnvironmentFromName returns an Environment based on the common name specified
func EnvironmentFromName(name string) (Environment, error) {
name = strings.ToUpper(name)
env, ok := environments[name]
if !ok {
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
}
return env, nil
}
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls
func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {
return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID)
}
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint
func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
template := "%s/oauth2/%s?api-version=%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
return &OAuthConfig{
AuthorizeEndpoint: *authorizeURL,
TokenEndpoint: *tokenURL,
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}

View File

@ -1,59 +0,0 @@
package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// LoadToken restores a Token object from a file located at 'path'.
func LoadToken(path string) (*Token, error) {
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
}
defer file.Close()
var token Token
dec := json.NewDecoder(file)
if err = dec.Decode(&token); err != nil {
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
}
return &token, nil
}
// SaveToken persists an oauth token at the given location on disk.
// It moves the new file into place so it can safely be used to replace an existing file
// that maybe accessed by multiple processes.
func SaveToken(path string, mode os.FileMode, token Token) error {
dir := filepath.Dir(path)
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
}
newFile, err := ioutil.TempFile(dir, "token")
if err != nil {
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
}
tempPath := newFile.Name()
if err := json.NewEncoder(newFile).Encode(token); err != nil {
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
}
if err := newFile.Close(); err != nil {
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
}
// Atomic replace to avoid multi-writer file corruptions
if err := os.Rename(tempPath, path); err != nil {
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
}
if err := os.Chmod(path, mode); err != nil {
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
}
return nil
}

View File

@ -1,363 +0,0 @@
package azure
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/dgrijalva/jwt-go"
)
const (
defaultRefresh = 5 * time.Minute
tokenBaseDate = "1970-01-01T00:00:00Z"
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
OAuthGrantTypeDeviceCode = "device_code"
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
OAuthGrantTypeClientCredentials = "client_credentials"
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
OAuthGrantTypeRefreshToken = "refresh_token"
)
var expirationBase time.Time
func init() {
expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate)
}
// TokenRefreshCallback is the type representing callbacks that will be called after
// a successful token refresh
type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests.
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn)
if err != nil {
s = -3600
}
return expirationBase.Add(time.Duration(s) * time.Second).UTC()
}
// IsExpired returns true if the Token is expired, false otherwise.
func (t Token) IsExpired() bool {
return t.WillExpireIn(0)
}
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
// from now, false otherwise.
func (t Token) WillExpireIn(d time.Duration) bool {
return !t.Expires().After(time.Now().Add(d))
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "Bearer " followed by the AccessToken of the Token.
func (t *Token) WithAuthorization() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r)
})
}
}
// ServicePrincipalNoSecret represents a secret type that contains no secret
// meaning it is not valid for fetching a fresh token. This is used by Manual
type ServicePrincipalNoSecret struct {
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
// It only returns an error for the ServicePrincipalNoSecret type
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
}
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
// that is submitted when acquiring an oAuth token.
type ServicePrincipalSecret interface {
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
}
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
type ServicePrincipalTokenSecret struct {
ClientSecret string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("client_secret", tokenSecret.ClientSecret)
return nil
}
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
type ServicePrincipalCertificateSecret struct {
Certificate *x509.Certificate
PrivateKey *rsa.PrivateKey
}
// SignJwt returns the JWT signed with the certificate's private key.
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
hasher := sha1.New()
_, err := hasher.Write(secret.Certificate.Raw)
if err != nil {
return "", err
}
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
// The jti (JWT ID) claim provides a unique identifier for the JWT.
jti := make([]byte, 20)
_, err = rand.Read(jti)
if err != nil {
return "", err
}
token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint
token.Claims = jwt.MapClaims{
"aud": spt.oauthConfig.TokenEndpoint.String(),
"iss": spt.clientID,
"sub": spt.clientID,
"jti": base64.URLEncoding.EncodeToString(jti),
"nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour * 24).Unix(),
}
signedString, err := token.SignedString(secret.PrivateKey)
return signedString, err
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
jwt, err := secret.SignJwt(spt)
if err != nil {
return err
}
v.Set("client_assertion", jwt)
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
return nil
}
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
type ServicePrincipalToken struct {
Token
secret ServicePrincipalSecret
oauthConfig OAuthConfig
clientID string
resource string
autoRefresh bool
refreshWithin time.Duration
sender autorest.Sender
refreshCallbacks []TokenRefreshCallback
}
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
spt := &ServicePrincipalToken{
oauthConfig: oauthConfig,
secret: secret,
clientID: id,
resource: resource,
autoRefresh: true,
refreshWithin: defaultRefresh,
sender: &http.Client{},
refreshCallbacks: callbacks,
}
return spt, nil
}
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
spt, err := NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalNoSecret{},
callbacks...)
if err != nil {
return nil, err
}
spt.Token = token
return spt, nil
}
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
// credentials scoped to the named resource.
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalTokenSecret{
ClientSecret: secret,
},
callbacks...,
)
}
// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
callbacks...,
)
}
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin).
func (spt *ServicePrincipalToken) EnsureFresh() error {
if spt.WillExpireIn(spt.refreshWithin) {
return spt.Refresh()
}
return nil
}
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
if spt.refreshCallbacks != nil {
for _, callback := range spt.refreshCallbacks {
err := callback(spt.Token)
if err != nil {
return autorest.NewErrorWithError(err,
"azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error")
}
}
}
return nil
}
// Refresh obtains a fresh token for the Service Principal.
func (spt *ServicePrincipalToken) Refresh() error {
return spt.refreshInternal(spt.resource)
}
// RefreshExchange refreshes the token, but for a different resource.
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
return spt.refreshInternal(resource)
}
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
v := url.Values{}
v.Set("client_id", spt.clientID)
v.Set("resource", resource)
if spt.RefreshToken != "" {
v.Set("grant_type", OAuthGrantTypeRefreshToken)
v.Set("refresh_token", spt.RefreshToken)
} else {
v.Set("grant_type", OAuthGrantTypeClientCredentials)
err := spt.secret.SetAuthenticationValues(spt, &v)
if err != nil {
return err
}
}
req, _ := autorest.Prepare(&http.Request{},
autorest.AsPost(),
autorest.AsFormURLEncoded(),
autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()),
autorest.WithFormData(v))
resp, err := autorest.SendWithSender(spt.sender, req)
if err != nil {
return autorest.NewErrorWithError(err,
"azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s",
spt.clientID)
}
var newToken Token
err = autorest.Respond(resp,
autorest.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&newToken),
autorest.ByClosing())
if err != nil {
return autorest.NewErrorWithError(err,
"azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request",
spt.clientID)
}
spt.Token = newToken
err = spt.InvokeRefreshCallbacks(newToken)
if err != nil {
// its already wrapped inside InvokeRefreshCallbacks
return err
}
return nil
}
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
spt.autoRefresh = autoRefresh
}
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
// refresh the token.
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
spt.refreshWithin = d
return
}
// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An
// undecorated http.Client is used by default.
func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) {
spt.sender = s
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken.
//
// By default, the token will automatically refresh if nearly expired (as determined by the
// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing
// tokens.
func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
if spt.autoRefresh {
err := spt.EnsureFresh()
if err != nil {
return r, autorest.NewErrorWithError(err,
"azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s",
r.URL)
}
}
return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r)
})
}
}

View File

@ -1,235 +0,0 @@
package autorest
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/cookiejar"
"runtime"
"time"
)
const (
// DefaultPollingDelay is a reasonable delay between polling requests.
DefaultPollingDelay = 60 * time.Second
// DefaultPollingDuration is a reasonable total polling duration.
DefaultPollingDuration = 15 * time.Minute
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
DefaultRetryAttempts = 3
)
var (
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
// and the go-autorest version.
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
Version(),
)
statusCodesForRetry = []int{
http.StatusRequestTimeout, // 408
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
)
const (
requestFormat = `HTTP Request Begin ===================================================
%s
===================================================== HTTP Request End
`
responseFormat = `HTTP Response Begin ===================================================
%s
===================================================== HTTP Response End
`
)
// Response serves as the base for all responses from generated clients. It provides access to the
// last http.Response.
type Response struct {
*http.Response `json:"-"`
}
// LoggingInspector implements request and response inspectors that log the full request and
// response to a supplied log.
type LoggingInspector struct {
Logger *log.Logger
}
// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
// body is restored after being emitted.
//
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
// important. It is best used to trace JSON or similar body values.
func (li LoggingInspector) WithInspection() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
var body, b bytes.Buffer
defer r.Body.Close()
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
if err := r.Write(&b); err != nil {
return nil, fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(requestFormat, b.String())
r.Body = ioutil.NopCloser(&body)
return p.Prepare(r)
})
}
}
// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
// body is restored after being emitted.
//
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
// important. It is best used to trace JSON or similar body values.
func (li LoggingInspector) ByInspecting() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
var body, b bytes.Buffer
defer resp.Body.Close()
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
if err := resp.Write(&b); err != nil {
return fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(responseFormat, b.String())
resp.Body = ioutil.NopCloser(&body)
return r.Respond(resp)
})
}
}
// Client is the base for autorest generated clients. It provides default, "do nothing"
// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
// standard, undecorated http.Client as a default Sender.
//
// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
// return responses that compose with Response.
//
// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
// sending the request by providing a decorated Sender.
type Client struct {
Authorizer Authorizer
Sender Sender
RequestInspector PrepareDecorator
ResponseInspector RespondDecorator
// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned.
PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client.
RetryAttempts int
// RetryDuration sets the delay duration for retries.
RetryDuration time.Duration
// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
// through the Do method.
UserAgent string
Jar http.CookieJar
}
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
// string.
func NewClientWithUserAgent(ua string) Client {
c := Client{
PollingDelay: DefaultPollingDelay,
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: 30 * time.Second,
UserAgent: defaultUserAgent,
}
c.AddToUserAgent(ua)
return c
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)
}
// Do implements the Sender interface by invoking the active Sender after applying authorization.
// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
// is set, apply set the User-Agent header.
func (c Client) Do(r *http.Request) (*http.Response, error) {
if r.UserAgent() == "" {
r, _ = Prepare(r,
WithUserAgent(c.UserAgent))
}
r, err := Prepare(r,
c.WithInspection(),
c.WithAuthorization())
if err != nil {
return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
}
resp, err := SendWithSender(c.sender(), r,
DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...))
Respond(resp,
c.ByInspecting())
return resp, err
}
// sender returns the Sender to which to send requests.
func (c Client) sender() Sender {
if c.Sender == nil {
j, _ := cookiejar.New(nil)
return &http.Client{Jar: j}
}
return c.Sender
}
// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
func (c Client) WithAuthorization() PrepareDecorator {
return c.authorizer().WithAuthorization()
}
// authorizer returns the Authorizer to use.
func (c Client) authorizer() Authorizer {
if c.Authorizer == nil {
return NullAuthorizer{}
}
return c.Authorizer
}
// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
// if present, or returns the WithNothing PrepareDecorator otherwise.
func (c Client) WithInspection() PrepareDecorator {
if c.RequestInspector == nil {
return WithNothing()
}
return c.RequestInspector
}
// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
// if present, or returns the ByIgnoring RespondDecorator otherwise.
func (c Client) ByInspecting() RespondDecorator {
if c.ResponseInspector == nil {
return ByIgnoring()
}
return c.ResponseInspector
}

View File

@ -1,82 +0,0 @@
/*
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of
time.Time types. And both convert to time.Time through a ToTime method.
*/
package date
import (
"fmt"
"time"
)
const (
fullDate = "2006-01-02"
fullDateJSON = `"2006-01-02"`
dateFormat = "%04d-%02d-%02d"
jsonFormat = `"%04d-%02d-%02d"`
)
// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
// 2006-01-02).
type Date struct {
time.Time
}
// ParseDate create a new Date from the passed string.
func ParseDate(date string) (d Date, err error) {
return parseDate(date, fullDate)
}
func parseDate(date string, format string) (Date, error) {
d, err := time.Parse(format, date)
return Date{Time: d}, err
}
// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalBinary() ([]byte, error) {
return d.MarshalText()
}
// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalBinary(data []byte) error {
return d.UnmarshalText(data)
}
// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalJSON() (json []byte, err error) {
return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
}
// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalJSON(data []byte) (err error) {
d.Time, err = time.Parse(fullDateJSON, string(data))
return err
}
// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalText() (text []byte, err error) {
return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
}
// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalText(data []byte) (err error) {
d.Time, err = time.Parse(fullDate, string(data))
return err
}
// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
func (d Date) String() string {
return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
}
// ToTime returns a Date as a time.Time
func (d Date) ToTime() time.Time {
return d.Time
}

View File

@ -1,89 +0,0 @@
package date
import (
"regexp"
"time"
)
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
const (
azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
azureUtcFormat = "2006-01-02T15:04:05.999999999"
rfc3339JSON = `"` + time.RFC3339Nano + `"`
rfc3339 = time.RFC3339Nano
tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$`
)
// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
type Time struct {
time.Time
}
// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalBinary() ([]byte, error) {
return t.Time.MarshalText()
}
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalBinary(data []byte) error {
return t.UnmarshalText(data)
}
// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalJSON() (json []byte, err error) {
return t.Time.MarshalJSON()
}
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalJSON(data []byte) (err error) {
timeFormat := azureUtcFormatJSON
match, err := regexp.Match(tzOffsetRegex, data)
if err != nil {
return err
} else if match {
timeFormat = rfc3339JSON
}
t.Time, err = ParseTime(timeFormat, string(data))
return err
}
// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalText() (text []byte, err error) {
return t.Time.MarshalText()
}
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalText(data []byte) (err error) {
timeFormat := azureUtcFormat
match, err := regexp.Match(tzOffsetRegex, data)
if err != nil {
return err
} else if match {
timeFormat = rfc3339
}
t.Time, err = ParseTime(timeFormat, string(data))
return err
}
// String returns the Time formatted as an RFC3339 date-time string (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) String() string {
// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
b, err := t.MarshalText()
if err != nil {
return ""
}
return string(b)
}
// ToTime returns a Time as a time.Time
func (t Time) ToTime() time.Time {
return t.Time
}

View File

@ -1,86 +0,0 @@
package date
import (
"errors"
"time"
)
const (
rfc1123JSON = `"` + time.RFC1123 + `"`
rfc1123 = time.RFC1123
)
// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
type TimeRFC1123 struct {
time.Time
}
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
t.Time, err = ParseTime(rfc1123JSON, string(data))
if err != nil {
return err
}
return nil
}
// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
}
b := []byte(t.Format(rfc1123JSON))
return b, nil
}
// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalText() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
}
b := []byte(t.Format(rfc1123))
return b, nil
}
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
t.Time, err = ParseTime(rfc1123, string(data))
if err != nil {
return err
}
return nil
}
// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
return t.MarshalText()
}
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
return t.UnmarshalText(data)
}
// ToTime returns a Time as a time.Time
func (t TimeRFC1123) ToTime() time.Time {
return t.Time
}
// String returns the Time formatted as an RFC1123 date-time string (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) String() string {
// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
b, err := t.MarshalText()
if err != nil {
return ""
}
return string(b)
}

View File

@ -1,11 +0,0 @@
package date
import (
"strings"
"time"
)
// ParseTime to parse Time string to specified format.
func ParseTime(format string, t string) (d time.Time, err error) {
return time.Parse(format, strings.ToUpper(t))
}

View File

@ -1,80 +0,0 @@
package autorest
import (
"fmt"
"net/http"
)
const (
// UndefinedStatusCode is used when HTTP status code is not available for an error.
UndefinedStatusCode = 0
)
// DetailedError encloses a error with details of the package, method, and associated HTTP
// status code (if any).
type DetailedError struct {
Original error
// PackageType is the package type of the object emitting the error. For types, the value
// matches that produced the the '%T' format specifier of the fmt package. For other elements,
// such as functions, it is just the package name (e.g., "autorest").
PackageType string
// Method is the name of the method raising the error.
Method string
// StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error.
StatusCode interface{}
// Message is the error message.
Message string
// Service Error is the response body of failed API in bytes
ServiceError []byte
}
// NewError creates a new Error conforming object from the passed packageType, method, and
// message. message is treated as a format string to which the optional args apply.
func NewError(packageType string, method string, message string, args ...interface{}) DetailedError {
return NewErrorWithError(nil, packageType, method, nil, message, args...)
}
// NewErrorWithResponse creates a new Error conforming object from the passed
// packageType, method, statusCode of the given resp (UndefinedStatusCode if
// resp is nil), and message. message is treated as a format string to which the
// optional args apply.
func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
return NewErrorWithError(nil, packageType, method, resp, message, args...)
}
// NewErrorWithError creates a new Error conforming object from the
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
// if resp is nil), message, and original error. message is treated as a format
// string to which the optional args apply.
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
if v, ok := original.(DetailedError); ok {
return v
}
statusCode := UndefinedStatusCode
if resp != nil {
statusCode = resp.StatusCode
}
return DetailedError{
Original: original,
PackageType: packageType,
Method: method,
StatusCode: statusCode,
Message: fmt.Sprintf(message, args...),
}
}
// Error returns a formatted containing all available details (i.e., PackageType, Method,
// StatusCode, Message, and original error (if any)).
func (e DetailedError) Error() string {
if e.Original == nil {
return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode)
}
return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original)
}

View File

@ -1,443 +0,0 @@
package autorest
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"strings"
)
const (
mimeTypeJSON = "application/json"
mimeTypeFormPost = "application/x-www-form-urlencoded"
headerAuthorization = "Authorization"
headerContentType = "Content-Type"
headerUserAgent = "User-Agent"
)
// Preparer is the interface that wraps the Prepare method.
//
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used.
type Preparer interface {
Prepare(*http.Request) (*http.Request, error)
}
// PreparerFunc is a method that implements the Preparer interface.
type PreparerFunc func(*http.Request) (*http.Request, error)
// Prepare implements the Preparer interface on PreparerFunc.
func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) {
return pf(r)
}
// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then affect the result.
type PrepareDecorator func(Preparer) Preparer
// CreatePreparer creates, decorates, and returns a Preparer.
// Without decorators, the returned Preparer returns the passed http.Request unmodified.
// Preparers are safe to share and re-use.
func CreatePreparer(decorators ...PrepareDecorator) Preparer {
return DecoratePreparer(
Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })),
decorators...)
}
// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it
// applies to the Preparer. Decorators are applied in the order received, but their affect upon the
// request depends on whether they are a pre-decorator (change the http.Request and then pass it
// along) or a post-decorator (pass the http.Request along and alter it on return).
func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer {
for _, decorate := range decorators {
p = decorate(p)
}
return p
}
// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators.
// It creates a Preparer from the decorators which it then applies to the passed http.Request.
func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) {
if r == nil {
return nil, NewError("autorest", "Prepare", "Invoked without an http.Request")
}
return CreatePreparer(decorators...).Prepare(r)
}
// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed
// http.Request.
func WithNothing() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
return p.Prepare(r)
})
}
}
// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to
// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before
// adding the header.
func WithHeader(header string, value string) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.Header == nil {
r.Header = make(http.Header)
}
r.Header.Set(http.CanonicalHeaderKey(header), value)
}
return r, err
})
}
}
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "Bearer " followed by the supplied token.
func WithBearerAuthorization(token string) PrepareDecorator {
return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token))
}
// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value
// is the passed contentType.
func AsContentType(contentType string) PrepareDecorator {
return WithHeader(headerContentType, contentType)
}
// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the
// passed string.
func WithUserAgent(ua string) PrepareDecorator {
return WithHeader(headerUserAgent, ua)
}
// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
// "application/x-www-form-urlencoded".
func AsFormURLEncoded() PrepareDecorator {
return AsContentType(mimeTypeFormPost)
}
// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
// "application/json".
func AsJSON() PrepareDecorator {
return AsContentType(mimeTypeJSON)
}
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
// decorator does not validate that the passed method string is a known HTTP method.
func WithMethod(method string) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r.Method = method
return p.Prepare(r)
})
}
}
// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE.
func AsDelete() PrepareDecorator { return WithMethod("DELETE") }
// AsGet returns a PrepareDecorator that sets the HTTP method to GET.
func AsGet() PrepareDecorator { return WithMethod("GET") }
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH.
func AsPatch() PrepareDecorator { return WithMethod("PATCH") }
// AsPost returns a PrepareDecorator that sets the HTTP method to POST.
func AsPost() PrepareDecorator { return WithMethod("POST") }
// AsPut returns a PrepareDecorator that sets the HTTP method to PUT.
func AsPut() PrepareDecorator { return WithMethod("PUT") }
// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
// from the supplied baseUrl.
func WithBaseURL(baseURL string) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
var u *url.URL
if u, err = url.Parse(baseURL); err != nil {
return r, err
}
if u.Scheme == "" {
err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
}
if err == nil {
r.URL = u
}
}
return r, err
})
}
}
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
parameters := ensureValueStrings(urlParameters)
for key, value := range parameters {
baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1)
}
return WithBaseURL(baseURL)
}
// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the
// http.Request body.
func WithFormData(v url.Values) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
s := v.Encode()
r.ContentLength = int64(len(s))
r.Body = ioutil.NopCloser(strings.NewReader(s))
}
return r, err
})
}
}
// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters
// into the http.Request body.
func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
var body bytes.Buffer
writer := multipart.NewWriter(&body)
for key, value := range formDataParameters {
if rc, ok := value.(io.ReadCloser); ok {
var fd io.Writer
if fd, err = writer.CreateFormFile(key, key); err != nil {
return r, err
}
if _, err = io.Copy(fd, rc); err != nil {
return r, err
}
} else {
if err = writer.WriteField(key, ensureValueString(value)); err != nil {
return r, err
}
}
}
if err = writer.Close(); err != nil {
return r, err
}
if r.Header == nil {
r.Header = make(http.Header)
}
r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r.ContentLength = int64(body.Len())
return r, err
}
return r, err
})
}
}
// WithFile returns a PrepareDecorator that sends file in request body.
func WithFile(f io.ReadCloser) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
b, err := ioutil.ReadAll(f)
if err != nil {
return r, err
}
r.Body = ioutil.NopCloser(bytes.NewReader(b))
r.ContentLength = int64(len(b))
}
return r, err
})
}
}
// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request
// and sets the Content-Length header.
func WithBool(v bool) PrepareDecorator {
return WithString(fmt.Sprintf("%v", v))
}
// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the
// request and sets the Content-Length header.
func WithFloat32(v float32) PrepareDecorator {
return WithString(fmt.Sprintf("%v", v))
}
// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the
// request and sets the Content-Length header.
func WithFloat64(v float64) PrepareDecorator {
return WithString(fmt.Sprintf("%v", v))
}
// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request
// and sets the Content-Length header.
func WithInt32(v int32) PrepareDecorator {
return WithString(fmt.Sprintf("%v", v))
}
// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request
// and sets the Content-Length header.
func WithInt64(v int64) PrepareDecorator {
return WithString(fmt.Sprintf("%v", v))
}
// WithString returns a PrepareDecorator that encodes the passed string into the body of the request
// and sets the Content-Length header.
func WithString(v string) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
r.ContentLength = int64(len(v))
r.Body = ioutil.NopCloser(strings.NewReader(v))
}
return r, err
})
}
}
// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the
// request and sets the Content-Length header.
func WithJSON(v interface{}) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
b, err := json.Marshal(v)
if err == nil {
r.ContentLength = int64(len(b))
r.Body = ioutil.NopCloser(bytes.NewReader(b))
}
}
return r, err
})
}
}
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
// is absolute (that is, it begins with a "/"), it replaces the existing path.
func WithPath(path string) PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.URL == nil {
return r, NewError("autorest", "WithPath", "Invoked with a nil URL")
}
if r.URL, err = parseURL(r.URL, path); err != nil {
return r, err
}
}
return r, err
})
}
}
// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The
// values will be escaped (aka URL encoded) before insertion into the path.
func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
parameters := escapeValueStrings(ensureValueStrings(pathParameters))
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.URL == nil {
return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL")
}
for key, value := range parameters {
path = strings.Replace(path, "{"+key+"}", value, -1)
}
if r.URL, err = parseURL(r.URL, path); err != nil {
return r, err
}
}
return r, err
})
}
}
// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map.
func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
parameters := ensureValueStrings(pathParameters)
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.URL == nil {
return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL")
}
for key, value := range parameters {
path = strings.Replace(path, "{"+key+"}", value, -1)
}
if r.URL, err = parseURL(r.URL, path); err != nil {
return r, err
}
}
return r, err
})
}
}
func parseURL(u *url.URL, path string) (*url.URL, error) {
p := strings.TrimRight(u.String(), "/")
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
return url.Parse(p + path)
}
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
// given in the supplied map (i.e., key=value).
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
parameters := ensureValueStrings(queryParameters)
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
if r.URL == nil {
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
}
v := r.URL.Query()
for key, value := range parameters {
v.Add(key, value)
}
r.URL.RawQuery = createQuery(v)
}
return r, err
})
}
}
// Authorizer is the interface that provides a PrepareDecorator used to supply request
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
// state of the formed HTTP request.
type Authorizer interface {
WithAuthorization() PrepareDecorator
}
// NullAuthorizer implements a default, "do nothing" Authorizer.
type NullAuthorizer struct{}
// WithAuthorization returns a PrepareDecorator that does nothing.
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
return WithNothing()
}

View File

@ -1,236 +0,0 @@
package autorest
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
// Responder is the interface that wraps the Respond method.
//
// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold
// state since Responders may be shared and re-used.
type Responder interface {
Respond(*http.Response) error
}
// ResponderFunc is a method that implements the Responder interface.
type ResponderFunc func(*http.Response) error
// Respond implements the Responder interface on ResponderFunc.
func (rf ResponderFunc) Respond(r *http.Response) error {
return rf(r)
}
// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to
// the http.Response and pass it along or, first, pass the http.Response along then react.
type RespondDecorator func(Responder) Responder
// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned
// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share
// and re-used: It depends on the applied decorators. For example, a standard decorator that closes
// the response body is fine to share whereas a decorator that reads the body into a passed struct
// is not.
//
// To prevent memory leaks, ensure that at least one Responder closes the response body.
func CreateResponder(decorators ...RespondDecorator) Responder {
return DecorateResponder(
Responder(ResponderFunc(func(r *http.Response) error { return nil })),
decorators...)
}
// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it
// applies to the Responder. Decorators are applied in the order received, but their affect upon the
// request depends on whether they are a pre-decorator (react to the http.Response and then pass it
// along) or a post-decorator (pass the http.Response along and then react).
func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder {
for _, decorate := range decorators {
r = decorate(r)
}
return r
}
// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators.
// It creates a Responder from the decorators it then applies to the passed http.Response.
func Respond(r *http.Response, decorators ...RespondDecorator) error {
if r == nil {
return nil
}
return CreateResponder(decorators...).Respond(r)
}
// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined
// to the next RespondDecorator.
func ByIgnoring() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
return r.Respond(resp)
})
}
}
// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as
// the Body is read.
func ByCopying(b *bytes.Buffer) RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && resp != nil && resp.Body != nil {
resp.Body = TeeReadCloser(resp.Body, b)
}
return err
})
}
}
// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which
// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed
// Responder is invoked prior to discarding the response body, the decorator may occur anywhere
// within the set.
func ByDiscardingBody() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && resp != nil && resp.Body != nil {
if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
return fmt.Errorf("Error discarding the response body: %v", err)
}
}
return err
})
}
}
// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it
// closes the response body. Since the passed Responder is invoked prior to closing the response
// body, the decorator may occur anywhere within the set.
func ByClosing() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if resp != nil && resp.Body != nil {
if err := resp.Body.Close(); err != nil {
return fmt.Errorf("Error closing the response body: %v", err)
}
}
return err
})
}
}
// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which
// it closes the response if the passed Responder returns an error and the response body exists.
func ByClosingIfError() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err != nil && resp != nil && resp.Body != nil {
if err := resp.Body.Close(); err != nil {
return fmt.Errorf("Error closing the response body: %v", err)
}
}
return err
})
}
}
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
// response Body into the value pointed to by v.
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
b, errInner := ioutil.ReadAll(resp.Body)
// Some responses might include a BOM, remove for successful unmarshalling
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else if len(strings.Trim(string(b), " ")) > 0 {
errInner = json.Unmarshal(b, v)
if errInner != nil {
err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b))
}
}
}
return err
})
}
}
// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the
// response Body into the value pointed to by v.
func ByUnmarshallingXML(v interface{}) RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
b, errInner := ioutil.ReadAll(resp.Body)
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else {
errInner = xml.Unmarshal(b, v)
if errInner != nil {
err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b))
}
}
}
return err
})
}
}
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
// StatusCode is among the set passed. On error, response body is fully read into a buffer and
// presented in the returned error, as well as in the response body.
func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && !ResponseHasStatusCode(resp, codes...) {
derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
resp.Request.Method,
resp.Request.URL,
resp.Status)
if resp.Body != nil {
defer resp.Body.Close()
b, _ := ioutil.ReadAll(resp.Body)
derr.ServiceError = b
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
}
err = derr
}
return err
})
}
}
// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is
// anything other than HTTP 200.
func WithErrorUnlessOK() RespondDecorator {
return WithErrorUnlessStatusCode(http.StatusOK)
}
// ExtractHeader extracts all values of the specified header from the http.Response. It returns an
// empty string slice if the passed http.Response is nil or the header does not exist.
func ExtractHeader(header string, resp *http.Response) []string {
if resp != nil && resp.Header != nil {
return resp.Header[http.CanonicalHeaderKey(header)]
}
return nil
}
// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It
// returns an empty string if the passed http.Response is nil or the header does not exist.
func ExtractHeaderValue(header string, resp *http.Response) string {
h := ExtractHeader(header, resp)
if len(h) > 0 {
return h[0]
}
return ""
}

View File

@ -1,270 +0,0 @@
package autorest
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"time"
)
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
type Sender interface {
Do(*http.Request) (*http.Response, error)
}
// SenderFunc is a method that implements the Sender interface.
type SenderFunc func(*http.Request) (*http.Response, error)
// Do implements the Sender interface on SenderFunc.
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
return DecorateSender(&http.Client{}, decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
// the Sender. Decorators are applied in the order received, but their affect upon the request
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
// post-decorator (pass the http.Request along and react to the results in http.Response).
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
for _, decorate := range decorators {
s = decorate(s)
}
return s
}
// Send sends, by means of the default http.Client, the passed http.Request, returning the
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
// it will apply the http.Client before invoking the Do method.
//
// Send is a convenience method and not recommended for production. Advanced users should use
// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client).
//
// Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
return SendWithSender(&http.Client{}, r, decorators...)
}
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
// it will apply the http.Client before invoking the Do method.
//
// SendWithSender will not poll or retry requests.
func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
return DecorateSender(s, decorators...).Do(r)
}
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
// invoking the Sender. The delay may be terminated by closing the optional channel on the
// http.Request. If canceled, no further Senders are invoked.
func AfterDelay(d time.Duration) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if !DelayForBackoff(d, 0, r.Cancel) {
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
}
return s.Do(r)
})
}
}
// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request.
func AsIs() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return s.Do(r)
})
}
}
// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which
// it closes the response if the passed Sender returns an error and the response body exists.
func DoCloseIfError() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
resp, err := s.Do(r)
if err != nil {
Respond(resp, ByDiscardingBody(), ByClosing())
}
return resp, err
})
}
}
// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is
// among the set passed. Since these are artificial errors, the response body may still require
// closing.
func DoErrorIfStatusCode(codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
resp, err := s.Do(r)
if err == nil && ResponseHasStatusCode(resp, codes...) {
err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s",
resp.Request.Method,
resp.Request.URL,
resp.Status)
}
return resp, err
})
}
}
// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response
// StatusCode is among the set passed. Since these are artificial errors, the response body
// may still require closing.
func DoErrorUnlessStatusCode(codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
resp, err := s.Do(r)
if err == nil && !ResponseHasStatusCode(resp, codes...) {
err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s",
resp.Request.Method,
resp.Request.URL,
resp.Status)
}
return resp, err
})
}
}
// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the
// passed status codes. It expects the http.Response to contain a Location header providing the
// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than
// the supplied duration. It will delay between requests for the duration specified in the
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
// closing the optional channel on the http.Request.
func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
resp, err = s.Do(r)
if err == nil && ResponseHasStatusCode(resp, codes...) {
r, err = NewPollingRequest(resp, r.Cancel)
for err == nil && ResponseHasStatusCode(resp, codes...) {
Respond(resp,
ByDiscardingBody(),
ByClosing())
resp, err = SendWithSender(s, r,
AfterDelay(GetRetryAfter(resp, delay)))
}
}
return resp, err
})
}
}
// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified
// number of attempts, exponentially backing off between requests using the supplied backoff
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
// the http.Request.
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
for attempt := 0; attempt < attempts; attempt++ {
resp, err = s.Do(r)
if err == nil {
return resp, err
}
DelayForBackoff(backoff, attempt, r.Cancel)
}
return resp, err
})
}
}
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
// number of attempts, exponentially backing off between requests using the supplied backoff
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
// the http.Request.
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
b := []byte{}
if r.Body != nil {
b, err = ioutil.ReadAll(r.Body)
if err != nil {
return resp, err
}
}
// Increment to add the first call (attempts denotes number of retries)
attempts++
for attempt := 0; attempt < attempts; attempt++ {
r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
resp, err = s.Do(r)
if err != nil || !ResponseHasStatusCode(resp, codes...) {
return resp, err
}
DelayForBackoff(backoff, attempt, r.Cancel)
}
return resp, err
})
}
}
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
// to or greater than the specified duration, exponentially backing off between requests using the
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
// optional channel on the http.Request.
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
end := time.Now().Add(d)
for attempt := 0; time.Now().Before(end); attempt++ {
resp, err = s.Do(r)
if err == nil {
return resp, err
}
DelayForBackoff(backoff, attempt, r.Cancel)
}
return resp, err
})
}
}
// WithLogging returns a SendDecorator that implements simple before and after logging of the
// request.
func WithLogging(logger *log.Logger) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
logger.Printf("Sending %s %s", r.Method, r.URL)
resp, err := s.Do(r)
if err != nil {
logger.Printf("%s %s received error '%v'", r.Method, r.URL, err)
} else {
logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status)
}
return resp, err
})
}
}
// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
// returns false.
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
// count.
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
select {
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
return true
case <-cancel:
return false
}
}

View File

@ -1,178 +0,0 @@
package autorest
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/url"
"reflect"
"sort"
"strings"
)
// EncodedAs is a series of constants specifying various data encodings
type EncodedAs string
const (
// EncodedAsJSON states that data is encoded as JSON
EncodedAsJSON EncodedAs = "JSON"
// EncodedAsXML states that data is encoded as Xml
EncodedAsXML EncodedAs = "XML"
)
// Decoder defines the decoding method json.Decoder and xml.Decoder share
type Decoder interface {
Decode(v interface{}) error
}
// NewDecoder creates a new decoder appropriate to the passed encoding.
// encodedAs specifies the type of encoding and r supplies the io.Reader containing the
// encoded data.
func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
if encodedAs == EncodedAsJSON {
return json.NewDecoder(r)
} else if encodedAs == EncodedAsXML {
return xml.NewDecoder(r)
}
return nil
}
// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy
// is especially useful if there is a chance the data will fail to decode.
// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
// is the decoding destination.
func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
b := bytes.Buffer{}
return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
}
// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.
// It utilizes io.TeeReader to copy the data read and has the same behavior when reading.
// Further, when it is closed, it ensures that rc is closed as well.
func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser {
return &teeReadCloser{rc, io.TeeReader(rc, w)}
}
type teeReadCloser struct {
rc io.ReadCloser
r io.Reader
}
func (t *teeReadCloser) Read(p []byte) (int, error) {
return t.r.Read(p)
}
func (t *teeReadCloser) Close() error {
return t.rc.Close()
}
func containsInt(ints []int, n int) bool {
for _, i := range ints {
if i == n {
return true
}
}
return false
}
func escapeValueStrings(m map[string]string) map[string]string {
for key, value := range m {
m[key] = url.QueryEscape(value)
}
return m
}
func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
mapOfStrings := make(map[string]string)
for key, value := range mapOfInterface {
mapOfStrings[key] = ensureValueString(value)
}
return mapOfStrings
}
func ensureValueString(value interface{}) string {
if value == nil {
return ""
}
switch v := value.(type) {
case string:
return v
case []byte:
return string(v)
default:
return fmt.Sprintf("%v", v)
}
}
// MapToValues method converts map[string]interface{} to url.Values.
func MapToValues(m map[string]interface{}) url.Values {
v := url.Values{}
for key, value := range m {
x := reflect.ValueOf(value)
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
for i := 0; i < x.Len(); i++ {
v.Add(key, ensureValueString(x.Index(i)))
}
} else {
v.Add(key, ensureValueString(value))
}
}
return v
}
// String method converts interface v to string. If interface is a list, it
// joins list elements using separator.
func String(v interface{}, sep ...string) string {
if len(sep) > 0 {
return ensureValueString(strings.Join(v.([]string), sep[0]))
}
return ensureValueString(v)
}
// Encode method encodes url path and query parameters.
func Encode(location string, v interface{}, sep ...string) string {
s := String(v, sep...)
switch strings.ToLower(location) {
case "path":
return pathEscape(s)
case "query":
return queryEscape(s)
default:
return s
}
}
func pathEscape(s string) string {
return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
}
func queryEscape(s string) string {
return url.QueryEscape(s)
}
// This method is same as Encode() method of "net/url" go package,
// except it does not encode the query parameters because they
// already come encoded. It formats values map in query format (bar=foo&a=b).
func createQuery(v url.Values) string {
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := url.QueryEscape(k) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(v)
}
}
return buf.String()
}

View File

@ -1,23 +0,0 @@
package autorest
import (
"fmt"
)
const (
major = "7"
minor = "3"
patch = "0"
tag = ""
semVerFormat = "%s.%s.%s%s"
)
var version string
// Version returns the semantic version (see http://semver.org).
func Version() string {
if version == "" {
version = fmt.Sprintf(semVerFormat, major, minor, patch, tag)
}
return version
}

28
vendor/vendor.json vendored
View File

@ -21,28 +21,16 @@
"revisionTime": "2016-08-11T21:22:31Z"
},
{
"checksumSHA1": "24EW3PGLGVSy/5joAOZdHzN/S2E=",
"path": "github.com/Azure/azure-storage-go",
"revision": "12ccaadb081cdd217702067d28da9a7ff4305239",
"revisionTime": "2017-03-02T22:15:08Z"
"checksumSHA1": "z+M6FYl9EKsoZZMLcT0Ktwfk8pI=",
"path": "github.com/Azure/azure-pipeline-go/pipeline",
"revision": "7571e8eb0876932ab505918ff7ed5107773e5ee2",
"revisionTime": "2018-06-07T21:19:23Z"
},
{
"checksumSHA1": "+sxS3YEvxf1VxlRskIdXFRWq9rY=",
"path": "github.com/Azure/go-autorest/autorest",
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
"revisionTime": "2017-02-15T19:58:14Z"
},
{
"checksumSHA1": "ghrnc4vZv6q8zzeakZnrS8CGFhE=",
"path": "github.com/Azure/go-autorest/autorest/azure",
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
"revisionTime": "2017-02-15T19:58:14Z"
},
{
"checksumSHA1": "q9Qz8PAxK5FTOZwgYKe5Lj38u4c=",
"path": "github.com/Azure/go-autorest/autorest/date",
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
"revisionTime": "2017-02-15T19:58:14Z"
"checksumSHA1": "5nsGu77r69lloEWbFhMof2UA9rY=",
"path": "github.com/Azure/azure-storage-blob-go/2018-03-28/azblob",
"revision": "eaae161d9d5e07363f04ddb19d84d57efc66d1a1",
"revisionTime": "2018-07-12T00:56:34Z"
},
{
"checksumSHA1": "QC55lHNOv1+UAL2xtIHw17MJ8J8=",