diff --git a/go.mod b/go.mod
index 24d328ff0d..94cba72db7 100644
--- a/go.mod
+++ b/go.mod
@@ -5,8 +5,8 @@ go 1.16
require (
cloud.google.com/go/bigtable v1.3.0
cloud.google.com/go/storage v1.10.0
- github.com/Azure/azure-pipeline-go v0.2.2
- github.com/Azure/azure-storage-blob-go v0.8.0
+ github.com/Azure/azure-pipeline-go v0.2.3
+ github.com/Azure/azure-storage-blob-go v0.13.0
github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5
github.com/NYTimes/gziphandler v1.1.1
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15
@@ -50,7 +50,7 @@ require (
github.com/sony/gobreaker v0.4.1
github.com/spf13/afero v1.2.2
github.com/stretchr/testify v1.7.0
- github.com/thanos-io/thanos v0.22.0
+ github.com/thanos-io/thanos v0.19.1-0.20210827151736-fdfc0776d0c3
github.com/uber/jaeger-client-go v2.29.1+incompatible
github.com/weaveworks/common v0.0.0-20210722103813-e649eff5ab4a
go.etcd.io/bbolt v1.3.6
diff --git a/go.sum b/go.sum
index 2ee420f83e..0d108cd1e4 100644
--- a/go.sum
+++ b/go.sum
@@ -54,8 +54,9 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
+github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@@ -71,8 +72,9 @@ github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo
github.com/Azure/azure-sdk-for-go v54.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v55.2.0+incompatible h1:TL2/vJWJEPOrmv97nHcbvjXES0Ntlb9P95hqGA1J2dU=
github.com/Azure/azure-sdk-for-go v55.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
+github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc=
+github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -265,6 +267,8 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw
github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM=
github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/baidubce/bce-sdk-go v0.9.81 h1:n8KfThLG9fvGv3A+RtTt/jKhg/FPPRpo+iNnS2r+iPI=
+github.com/baidubce/bce-sdk-go v0.9.81/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
@@ -552,6 +556,8 @@ github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7j
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/efficientgo/e2e v0.9.0/go.mod h1:sSNnkQ1E5emVqwUBBMhktSCG19u06De0v762CXcaE18=
+github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M=
github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 h1:kM/ALyvAnTrwSB+nlKqoKaDnZbInp1YImZvW+gtHwc8=
github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q=
github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY=
@@ -1237,8 +1243,9 @@ github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+v
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs=
github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@@ -1715,8 +1722,8 @@ github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDG
github.com/thanos-io/thanos v0.13.1-0.20210224074000-659446cab117/go.mod h1:kdqFpzdkveIKpNNECVJd75RPvgsAifQgJymwCdfev1w=
github.com/thanos-io/thanos v0.13.1-0.20210226164558-03dace0a1aa1/go.mod h1:gMCy4oCteKTT7VuXVvXLTPGzzjovX1VPE5p+HgL1hyU=
github.com/thanos-io/thanos v0.13.1-0.20210401085038-d7dff0c84d17/go.mod h1:zU8KqE+6A+HksK4wiep8e/3UvCZLm+Wrw9AqZGaAm9k=
-github.com/thanos-io/thanos v0.22.0 h1:bHTzC0ZaP5rBJ2pOeJ73+hO/p3U4tSshJoR3pumM6sA=
-github.com/thanos-io/thanos v0.22.0/go.mod h1:SZDWz3phcUcBr4MYFoPFRvl+Z9Nbi45HlwQlwSZSt+Q=
+github.com/thanos-io/thanos v0.19.1-0.20210827151736-fdfc0776d0c3 h1:ffbPKImogZ6/f0dVoaSDVBcIaOt0GbFn36TJg6cV+u0=
+github.com/thanos-io/thanos v0.19.1-0.20210827151736-fdfc0776d0c3/go.mod h1:cnuwdxn3JYTj2XBJc+Odn3iwkA2HrWuA/goEKkCCack=
github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU=
github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -1899,6 +1906,7 @@ go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU=
+go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
@@ -2189,6 +2197,7 @@ golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/pkg/chunk/azure/blob_storage_client.go b/pkg/chunk/azure/blob_storage_client.go
index 88e11ff6dc..be0dd6b2e1 100644
--- a/pkg/chunk/azure/blob_storage_client.go
+++ b/pkg/chunk/azure/blob_storage_client.go
@@ -135,7 +135,7 @@ func (b *BlobStorage) getObject(ctx context.Context, objectKey string) (rc io.Re
}
// Request access to the blob
- downloadResponse, err := blockBlobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
+ downloadResponse, err := blockBlobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if err != nil {
if isObjNotFoundErr(err) {
return nil, chunk.ErrStorageObjectNotFound
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go
index 139c72f4c4..601daf99fd 100644
--- a/pkg/compactor/compactor.go
+++ b/pkg/compactor/compactor.go
@@ -4,6 +4,7 @@ import (
"context"
"flag"
"fmt"
+ dto "github.com/prometheus/client_model/go"
"hash/fnv"
"io/ioutil"
"math/rand"
@@ -45,7 +46,7 @@ var (
errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s"
RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil)
- DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper {
+ DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter, blocksMarkedForNoCompact prometheus.Counter) compact.Grouper {
return compact.NewDefaultGrouper(
logger,
bkt,
@@ -54,6 +55,7 @@ var (
reg,
blocksMarkedForDeletion,
garbageCollectedBlocks,
+ blocksMarkedForNoCompact,
metadata.NoneFunc)
}
@@ -77,6 +79,7 @@ type BlocksGrouperFactory func(
reg prometheus.Registerer,
blocksMarkedForDeletion prometheus.Counter,
garbageCollectedBlocks prometheus.Counter,
+ blocksMarkedForNoCompact prometheus.Counter,
) compact.Grouper
// BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks.
@@ -218,11 +221,22 @@ type Compactor struct {
compactionRunInterval prometheus.Gauge
blocksMarkedForDeletion prometheus.Counter
garbageCollectedBlocks prometheus.Counter
+ blocksMarkedForNoCompact prometheus.Counter
// TSDB syncer metrics
syncerMetrics *syncerMetrics
}
+type noOpsCounter struct {
+}
+
+func (noOpsCounter) Inc() {}
+func (noOpsCounter) Add(float64) {}
+func (noOpsCounter) Write(*dto.Metric) error { return nil }
+func (noOpsCounter) Desc() *prometheus.Desc { return nil }
+func (noOpsCounter) Describe(chan<- *prometheus.Desc) {}
+func (noOpsCounter) Collect(chan<- prometheus.Metric) {}
+
// NewCompactor makes a new Compactor.
func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, cfgProvider ConfigProvider, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) {
bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) {
@@ -315,6 +329,7 @@ func newCompactor(
Name: "cortex_compactor_garbage_collected_blocks_total",
Help: "Total number of blocks marked for deletion by compactor.",
}),
+ blocksMarkedForNoCompact: noOpsCounter{},
}
if len(compactorCfg.EnabledTenants) > 0 {
@@ -650,12 +665,13 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error {
compactor, err := compact.NewBucketCompactor(
ulogger,
syncer,
- c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.garbageCollectedBlocks),
+ c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.garbageCollectedBlocks, c.blocksMarkedForNoCompact),
c.blocksPlanner,
c.blocksCompactor,
path.Join(c.compactorCfg.DataDir, "compact"),
bucket,
c.compactorCfg.CompactionConcurrency,
+ false,
)
if err != nil {
return errors.Wrap(err, "failed to create bucket compactor")
diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go
index 75523eac36..1d50590fab 100644
--- a/pkg/compactor/compactor_test.go
+++ b/pkg/compactor/compactor_test.go
@@ -265,6 +265,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) {
"cortex_compactor_block_cleanup_started_total",
"cortex_compactor_block_cleanup_completed_total",
"cortex_compactor_block_cleanup_failed_total",
+ "cortex_compactor_blocks_marked_for_no_compact_total",
))
}
@@ -411,6 +412,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket
"cortex_compactor_block_cleanup_started_total",
"cortex_compactor_block_cleanup_completed_total",
"cortex_compactor_block_cleanup_failed_total",
+ "cortex_compactor_blocks_marked_for_no_compact_total",
))
}
diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
index 4aaf066501..5d3d4339e4 100644
--- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
+++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
@@ -49,6 +49,9 @@ func (e *ErrorNode) Error(msg string) string {
// Cause returns the error that preceded this error.
func (e *ErrorNode) Cause() error { return e.cause }
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (e *ErrorNode) Unwrap() error { return e.cause }
+
// Temporary returns true if the error occurred due to a temporary condition.
func (e ErrorNode) Temporary() bool {
type temporary interface {
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
deleted file mode 100644
index 9e18a79436..0000000000
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package azblob
-
-import "sync/atomic"
-
-// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
-// The AtomicMorpher callback is passed a startValue and based on this value it returns
-// what the new value should be and the result that AtomicMorph should return to its caller.
-type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
-
-const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
-
-// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
-func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
- for {
- currentVal := atomic.LoadInt32(target)
- desiredVal, morphResult := morpher(currentVal)
- if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) {
- return morphResult
- }
- }
-}
-
-// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
-// The AtomicMorpher callback is passed a startValue and based on this value it returns
-// what the new value should be and the result that AtomicMorph should return to its caller.
-type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
-
-// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
-func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
- for {
- currentVal := atomic.LoadUint32(target)
- desiredVal, morphResult := morpher(currentVal)
- if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
- return morphResult
- }
- }
-}
-
-// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
-// The AtomicMorpher callback is passed a startValue and based on this value it returns
-// what the new value should be and the result that AtomicMorph should return to its caller.
-type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
-
-// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
-func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
- for {
- currentVal := atomic.LoadInt64(target)
- desiredVal, morphResult := morpher(currentVal)
- if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) {
- return morphResult
- }
- }
-}
-
-// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
-// The AtomicMorpher callback is passed a startValue and based on this value it returns
-// what the new value should be and the result that AtomicMorph should return to its caller.
-type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
-
-// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
-func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
- for {
- currentVal := atomic.LoadUint64(target)
- desiredVal, morphResult := morpher(currentVal)
- if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) {
- return morphResult
- }
- }
-}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
index af0944348b..7d5a13b3bc 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
@@ -3,6 +3,7 @@ package azblob
import (
"context"
"encoding/base64"
+ "fmt"
"io"
"net/http"
@@ -55,24 +56,32 @@ type UploadToBlockBlobOptions struct {
// AccessConditions indicates the access conditions for the block blob.
AccessConditions BlobAccessConditions
+ // BlobAccessTier indicates the tier of blob
+ BlobAccessTier AccessTierType
+
+ // BlobTagsMap
+ BlobTagsMap BlobTagsMap
+
+ // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
+ ClientProvidedKeyOptions ClientProvidedKeyOptions
+
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
Parallelism uint16
}
-// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
-func UploadBufferToBlockBlob(ctx context.Context, b []byte,
+// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob.
+func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
- bufferSize := int64(len(b))
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
- if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
+ if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
return nil, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
- if bufferSize <= BlockBlobMaxUploadBlobBytes {
+ if readerSize <= BlockBlobMaxUploadBlobBytes {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
} else {
- o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
+ o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = BlobDefaultDownloadBlockSize
}
@@ -80,31 +89,31 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
}
}
- if bufferSize <= BlockBlobMaxUploadBlobBytes {
+ if readerSize <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
- var body io.ReadSeeker = bytes.NewReader(b)
+ var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
if o.Progress != nil {
body = pipeline.NewRequestBodyProgress(body, o.Progress)
}
- return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
+ return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions)
}
- var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
+ var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
- OperationName: "UploadBufferToBlockBlob",
- TransferSize: bufferSize,
+ OperationName: "uploadReaderAtToBlockBlob",
+ TransferSize: readerSize,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(offset int64, count int64, ctx context.Context) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
- var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count])
+ var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
@@ -122,7 +131,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
- _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
+ _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions)
return err
},
})
@@ -130,7 +139,13 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
return nil, err
}
// All put blocks were successful, call Put Block List to finalize the blob
- return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
+ return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions)
+}
+
+// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
+func UploadBufferToBlockBlob(ctx context.Context, b []byte,
+ blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
+ return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o)
}
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
@@ -141,15 +156,7 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File,
if err != nil {
return nil, err
}
- m := mmf{} // Default to an empty slice; used for 0-size file
- if stat.Size() != 0 {
- m, err = newMMF(file, false, 0, int(stat.Size()))
- if err != nil {
- return nil, err
- }
- defer m.unmap()
- }
- return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o)
+ return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o)
}
///////////////////////////////////////////////////////////////////////////////
@@ -167,6 +174,9 @@ type DownloadFromBlobOptions struct {
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions BlobAccessConditions
+ // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
+ ClientProvidedKeyOptions ClientProvidedKeyOptions
+
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
Parallelism uint16
@@ -174,9 +184,9 @@ type DownloadFromBlobOptions struct {
RetryReaderOptionsPerBlock RetryReaderOptions
}
-// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
-func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
- b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
+// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel.
+func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64,
+ writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}
@@ -186,7 +196,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
} else {
// If we don't have the length at all, get it
- dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
+ dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions)
if err != nil {
return err
}
@@ -194,17 +204,22 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
}
}
+ if count <= 0 {
+ // The file is empty, there is nothing to download.
+ return nil
+ }
+
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
- OperationName: "downloadBlobToBuffer",
+ OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
- dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
+ dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions)
if err != nil {
return err
}
@@ -222,7 +237,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
progressLock.Unlock()
})
}
- _, err = io.ReadFull(body, b[chunkStart:chunkStart+count])
+ _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body)
body.Close()
return err
},
@@ -237,7 +252,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
b []byte, o DownloadFromBlobOptions) error {
- return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
+ return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil)
}
// DownloadBlobToFile downloads an Azure blob to a local file.
@@ -250,7 +265,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
if count == CountToEnd {
// Try to get Azure blob's size
- props, err := blobURL.GetProperties(ctx, o.AccessConditions)
+ props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions)
if err != nil {
return err
}
@@ -271,13 +286,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
}
if size > 0 {
- // 3. Set mmap and call downloadBlobToBuffer.
- m, err := newMMF(file, true, 0, int(size))
- if err != nil {
- return err
- }
- defer m.unmap()
- return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
+ return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
@@ -301,6 +310,10 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
return errors.New("ChunkSize cannot be 0")
}
+ if o.Parallelism == 0 {
+ o.Parallelism = 5 // default Parallelism
+ }
+
// Prepare and do parallel operations.
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
@@ -309,9 +322,6 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
defer cancel()
// Create the goroutines that process each operation (in parallel).
- if o.Parallelism == 0 {
- o.Parallelism = 5 // default Parallelism
- }
for g := uint16(0); g < o.Parallelism; g++ {
//grIndex := g
go func() {
@@ -352,192 +362,205 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
////////////////////////////////////////////////////////////////////////////////////////////////
-type UploadStreamToBlockBlobOptions struct {
- BufferSize int
- MaxBuffers int
- BlobHTTPHeaders BlobHTTPHeaders
- Metadata Metadata
- AccessConditions BlobAccessConditions
+// TransferManager provides a buffer and thread pool manager for certain transfer options.
+// It is undefined behavior if code outside of this package call any of these methods.
+type TransferManager interface {
+ // Get provides a buffer that will be used to read data into and write out to the stream.
+ // It is guaranteed by this package to not read or write beyond the size of the slice.
+ Get() []byte
+ // Put may or may not put the buffer into underlying storage, depending on settings.
+ // The buffer must not be touched after this has been called.
+ Put(b []byte)
+ // Run will use a goroutine pool entry to run a function. This blocks until a pool
+ // goroutine becomes available.
+ Run(func())
+ // Closes shuts down all internal goroutines. This must be called when the TransferManager
+ // will no longer be used. Not closing it will cause a goroutine leak.
+ Close()
}
-func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
- o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
- result, err := uploadStream(ctx, reader,
- UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
- &uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
- if err != nil {
- return nil, err
+type staticBuffer struct {
+ buffers chan []byte
+ size int
+ threadpool chan func()
+}
+
+// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
+// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
+// can be shared between calls if you wish to control maximum memory and concurrency with
+// multiple concurrent calls.
+func NewStaticBuffer(size, max int) (TransferManager, error) {
+ if size < 1 || max < 1 {
+ return nil, fmt.Errorf("cannot be called with size or max set to < 1")
+ }
+
+ if size < _1MiB {
+ return nil, fmt.Errorf("cannot have size < 1MiB")
+ }
+
+ threadpool := make(chan func(), max)
+ buffers := make(chan []byte, max)
+ for i := 0; i < max; i++ {
+ go func() {
+ for f := range threadpool {
+ f()
+ }
+ }()
+
+ buffers <- make([]byte, size)
}
- return result.(CommonResponse), nil
+ return staticBuffer{
+ buffers: buffers,
+ size: size,
+ threadpool: threadpool,
+ }, nil
}
-type uploadStreamToBlockBlobOptions struct {
- b BlockBlobURL
- o UploadStreamToBlockBlobOptions
- blockIDPrefix uuid // UUID used with all blockIDs
- maxBlockNum uint32 // defaults to 0
- firstBlock []byte // Used only if maxBlockNum is 0
+// Get implements TransferManager.Get().
+func (s staticBuffer) Get() []byte {
+ return <-s.buffers
}
-func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) {
- return nil, nil
+// Put implements TransferManager.Put().
+func (s staticBuffer) Put(b []byte) {
+ select {
+ case s.buffers <- b:
+ default: // This shouldn't happen, but just in case they call Put() with there own buffer.
+ }
}
-func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
- if num == 0 {
- t.firstBlock = buffer
+// Run implements TransferManager.Run().
+func (s staticBuffer) Run(f func()) {
+ s.threadpool <- f
+}
- // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
- // If the payload is exactly the same size as the buffer, there may be more content coming in.
- if len(buffer) < t.o.BufferSize {
- return nil
- }
- }
- // Else, upload a staged block...
- atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
- // Atomically remember (in t.numBlocks) the maximum block num we've ever seen
- if startVal < num {
- return num, nil
- }
- return startVal, nil
- })
- blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
- _, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
- return err
+// Close implements TransferManager.Close().
+func (s staticBuffer) Close() {
+ close(s.threadpool)
+ close(s.buffers)
}
-func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
- // If the first block had the exact same size as the buffer
- // we would have staged it as a block thinking that there might be more data coming
- if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize {
- // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
- return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
- t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
+type syncPool struct {
+ threadpool chan func()
+ pool sync.Pool
+}
+
+// NewSyncPool creates a TransferManager that will use a sync.Pool
+// that can hold a non-capped number of buffers constrained by concurrency. This
+// can be shared between calls if you wish to share memory and concurrency.
+func NewSyncPool(size, concurrency int) (TransferManager, error) {
+ if size < 1 || concurrency < 1 {
+ return nil, fmt.Errorf("cannot be called with size or max set to < 1")
}
- // Multiple blocks staged, commit them all now
- blockID := newUuidBlockID(t.blockIDPrefix)
- blockIDs := make([]string, t.maxBlockNum+1)
- for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
- blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
+
+ if size < _1MiB {
+ return nil, fmt.Errorf("cannot have size < 1MiB")
}
- return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
-}
-////////////////////////////////////////////////////////////////////////////////////////////////////
+ threadpool := make(chan func(), concurrency)
+ for i := 0; i < concurrency; i++ {
+ go func() {
+ for f := range threadpool {
+ f()
+ }
+ }()
+ }
-type iTransfer interface {
- start(ctx context.Context) (interface{}, error)
- chunk(ctx context.Context, num uint32, buffer []byte) error
- end(ctx context.Context) (interface{}, error)
+ return &syncPool{
+ threadpool: threadpool,
+ pool: sync.Pool{
+ New: func() interface{} {
+ return make([]byte, size)
+ },
+ },
+ }, nil
}
-type UploadStreamOptions struct {
- MaxBuffers int
- BufferSize int
+// Get implements TransferManager.Get().
+func (s *syncPool) Get() []byte {
+ return s.pool.Get().([]byte)
}
-type firstErr struct {
- lock sync.Mutex
- finalError error
+// Put implements TransferManager.Put().
+func (s *syncPool) Put(b []byte) {
+ s.pool.Put(b)
}
-func (fe *firstErr) set(err error) {
- fe.lock.Lock()
- if fe.finalError == nil {
- fe.finalError = err
- }
- fe.lock.Unlock()
+// Run implements TransferManager.Run().
+func (s *syncPool) Run(f func()) {
+ s.threadpool <- f
}
-func (fe *firstErr) get() (err error) {
- fe.lock.Lock()
- err = fe.finalError
- fe.lock.Unlock()
- return
+// Close implements TransferManager.Close().
+func (s *syncPool) Close() {
+ close(s.threadpool)
}
-func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
- firstErr := firstErr{}
- ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
- defer cancel()
- wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
- type OutgoingMsg struct {
- chunkNum uint32
- buffer []byte
+const _1MiB = 1024 * 1024
+
+// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob.
+type UploadStreamToBlockBlobOptions struct {
+ // TransferManager provides a TransferManager that controls buffer allocation/reuse and
+ // concurrency. This overrides BufferSize and MaxBuffers if set.
+ TransferManager TransferManager
+ transferMangerNotSet bool
+ // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
+ BufferSize int
+ // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
+ MaxBuffers int
+ BlobHTTPHeaders BlobHTTPHeaders
+ Metadata Metadata
+ AccessConditions BlobAccessConditions
+ BlobAccessTier AccessTierType
+ BlobTagsMap BlobTagsMap
+ ClientProvidedKeyOptions ClientProvidedKeyOptions
+}
+
+func (u *UploadStreamToBlockBlobOptions) defaults() error {
+ if u.TransferManager != nil {
+ return nil
}
- // Create a channel to hold the buffers usable for incoming datsa
- incoming := make(chan []byte, o.MaxBuffers)
- outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
- if result, err := t.start(ctx); err != nil {
- return result, err
+ if u.MaxBuffers == 0 {
+ u.MaxBuffers = 1
}
- numBuffers := 0 // The number of buffers & out going goroutines created so far
- injectBuffer := func() {
- // For each Buffer, create it and a goroutine to upload it
- incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
- numBuffers++
- go func() {
- for outgoingMsg := range outgoing {
- // Upload the outgoing buffer
- err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
- wg.Done() // Indicate this buffer was sent
- if nil != err {
- // NOTE: finalErr could be assigned to multiple times here which is OK,
- // some error will be returned.
- firstErr.set(err)
- cancel()
- }
- incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
- }
- }()
+ if u.BufferSize < _1MiB {
+ u.BufferSize = _1MiB
}
- injectBuffer() // Create our 1st buffer & outgoing goroutine
-
- // This goroutine grabs a buffer, reads from the stream into the buffer,
- // and inserts the buffer into the outgoing channel to be uploaded
- for c := uint32(0); true; c++ { // Iterate once per chunk
- var buffer []byte
- if numBuffers < o.MaxBuffers {
- select {
- // We're not at max buffers, see if a previously-created buffer is available
- case buffer = <-incoming:
- break
- default:
- // No buffer available; inject a new buffer & go routine to process it
- injectBuffer()
- buffer = <-incoming // Grab the just-injected buffer
- }
- } else {
- // We are at max buffers, block until we get to reuse one
- buffer = <-incoming
- }
- n, err := io.ReadFull(reader, buffer)
- if err != nil { // Less than len(buffer) bytes were read
- buffer = buffer[:n] // Make slice match the # of read bytes
- }
- if len(buffer) > 0 {
- // Buffer not empty, upload it
- wg.Add(1) // We're posting a buffer to be sent
- outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
- }
- if err != nil { // The reader is done, no more outgoing buffers
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
- } else {
- firstErr.set(err)
- }
- break
- }
+
+ var err error
+ u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers)
+ if err != nil {
+ return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
}
- // NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
- close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
- wg.Wait() // Wait for all pending outgoing messages to complete
- err := firstErr.get()
- if err == nil {
- // If no error, after all blocks uploaded, commit them to the blob & return the result
- return t.end(ctx)
+ u.transferMangerNotSet = true
+ return nil
+}
+
+// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL.
+// A Context deadline or cancellation will cause this to error.
+func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
+ if err := o.defaults(); err != nil {
+ return nil, err
}
- return nil, err
+
+ // If we used the default manager, we need to close it.
+ if o.transferMangerNotSet {
+ defer o.TransferManager.Close()
+ }
+
+ result, err := copyFromReader(ctx, reader, blockBlobURL, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version.
+// TODO: Remove on next minor release in v0 or before v1.
+type UploadStreamOptions struct {
+ BufferSize int
+ MaxBuffers int
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
index 067939b1a4..93c71eb972 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
@@ -8,6 +8,7 @@ import (
const (
snapshot = "snapshot"
+ versionId = "versionid"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
)
@@ -23,6 +24,7 @@ type BlobURLParts struct {
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
+ VersionID string // "" if not versioning enabled
}
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
@@ -85,12 +87,20 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
// Convert the query parameters to a case-sensitive map & trim whitespace
paramsMap := u.Query()
- up.Snapshot = "" // Assume no snapshot
+ up.Snapshot = "" // Assume no snapshot
+ up.VersionID = "" // Assume no versionID
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
up.Snapshot = snapshotStr[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, snapshot)
}
+
+ if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok {
+ up.VersionID = versionIDs[0]
+ // If we recognized the query parameter, remove it from the map
+ delete(paramsMap, versionId) // delete "versionid" from paramsMap
+ delete(paramsMap, "versionId") // delete "versionId" from paramsMap
+ }
up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode()
return up
@@ -136,6 +146,15 @@ func (up BlobURLParts) URL() url.URL {
}
rawQuery += snapshot + "=" + up.Snapshot
}
+
+ // Concatenate blob version id query parameter (if it exists)
+ if up.VersionID != "" {
+ if len(rawQuery) > 0 {
+ rawQuery += "&"
+ }
+ rawQuery += versionId + "=" + up.VersionID
+ }
+
sas := up.SAS.Encode()
if sas != "" {
if len(rawQuery) > 0 {
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
index 4d45d3ec77..11b1830451 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
@@ -44,6 +44,14 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountC
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
+ } else if v.Version != "" {
+ resource = "bv"
+ //Make sure the permission characters are in the correct order
+ perms := &BlobSASPermissions{}
+ if err := perms.Parse(v.Permissions); err != nil {
+ return SASQueryParameters{}, err
+ }
+ v.Permissions = perms.String()
} else if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
@@ -155,7 +163,7 @@ func getCanonicalName(account string, containerName string, blobName string) str
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type ContainerSASPermissions struct {
- Read, Add, Create, Write, Delete, List bool
+ Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
}
// String produces the SAS permissions string for an Azure Storage container.
@@ -177,9 +185,15 @@ func (p ContainerSASPermissions) String() string {
if p.Delete {
b.WriteRune('d')
}
+ if p.DeletePreviousVersion {
+ b.WriteRune('x')
+ }
if p.List {
b.WriteRune('l')
}
+ if p.Tag {
+ b.WriteRune('t')
+ }
return b.String()
}
@@ -198,10 +212,14 @@ func (p *ContainerSASPermissions) Parse(s string) error {
p.Write = true
case 'd':
p.Delete = true
+ case 'x':
+ p.DeletePreviousVersion = true
case 'l':
p.List = true
+ case 't':
+ p.Tag = true
default:
- return fmt.Errorf("Invalid permission: '%v'", r)
+ return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
@@ -209,7 +227,7 @@ func (p *ContainerSASPermissions) Parse(s string) error {
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
-type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
+type BlobSASPermissions struct{ Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag bool }
// String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSASSignatureValues's Permissions field.
@@ -230,6 +248,12 @@ func (p BlobSASPermissions) String() string {
if p.Delete {
b.WriteRune('d')
}
+ if p.DeletePreviousVersion {
+ b.WriteRune('x')
+ }
+ if p.Tag {
+ b.WriteRune('t')
+ }
return b.String()
}
@@ -248,8 +272,12 @@ func (p *BlobSASPermissions) Parse(s string) error {
p.Write = true
case 'd':
p.Delete = true
+ case 'x':
+ p.DeletePreviousVersion = true
+ case 't':
+ p.Tag = true
default:
- return fmt.Errorf("Invalid permission: '%v'", r)
+ return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
index d260f8aee5..292710cc34 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
@@ -61,8 +61,11 @@ const (
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
- // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
- ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
+ // ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob.
+ ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch"
+
+ // ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
+ ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
index b6bd6af112..363353a063 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
@@ -42,21 +42,40 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
}
+// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
+// Pass "" to remove the snapshot returning a URL to the base blob.
+func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL {
+ p := NewBlobURLParts(ab.URL())
+ p.VersionID = versionId
+ return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
+}
+
+func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
+ return ab.blobClient.GetAccountInfo(ctx)
+}
+
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
+func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*AppendBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return ab.abClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
- ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
+ nil, // Blob ifTags
+ nil,
+ blobTagsString, // Blob tags
+ )
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
-func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
+func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
count, err := validateSeekableStreamAt0AndGetCount(body)
@@ -64,21 +83,32 @@ func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac
return nil, err
}
return ab.abClient.AppendBlock(ctx, body, count, nil,
- transactionalMD5, ac.LeaseAccessConditions.pointers(),
+ transactionalMD5,
+ nil, // CRC
+ ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
-func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) {
+func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockFromURLResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers()
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
- transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
+ transactionalMD5, nil, nil, nil,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
+ destinationAccessConditions.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
type AppendBlobAccessConditions struct {
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
index 41d13402c9..6f453e6698 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
@@ -2,9 +2,9 @@ package azblob
import (
"context"
- "net/url"
-
"github.com/Azure/azure-pipeline-go/pipeline"
+ "net/url"
+ "strings"
)
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
@@ -12,6 +12,11 @@ type BlobURL struct {
blobClient blobClient
}
+type BlobTagsMap map[string]string
+
+var DefaultAccessTier AccessTierType = AccessTierNone
+var DefaultPremiumBlobAccessTier PremiumPageBlobAccessTierType = PremiumPageBlobAccessTierNone
+
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
blobClient := newBlobClient(url, p)
@@ -29,6 +34,10 @@ func (b BlobURL) String() string {
return u.String()
}
+func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
+ return b.blobClient.GetAccountInfo(ctx)
+}
+
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
return NewBlobURL(b.blobClient.URL(), p)
@@ -42,6 +51,14 @@ func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
}
+// WithVersionID creates a new BlobURL object identical to the source but with the specified version id.
+// Pass "" to remove the snapshot returning a URL to the base blob.
+func (b BlobURL) WithVersionID(versionID string) BlobURL {
+ p := NewBlobURLParts(b.URL())
+ p.VersionID = versionID
+ return NewBlobURL(p.URL(), b.blobClient.Pipeline())
+}
+
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
@@ -57,19 +74,49 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
}
-// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
+func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string {
+ if blobTagsMap == nil {
+ return nil
+ }
+ tags := make([]string, 0)
+ for key, val := range blobTagsMap {
+ tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
+ }
+ //tags = tags[:len(tags)-1]
+ blobTagsString := strings.Join(tags, "&")
+ return &blobTagsString
+}
+
+func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags {
+ if blobTagsMap == nil {
+ return BlobTags{}
+ }
+ blobTagSet := make([]BlobTag, 0, len(blobTagsMap))
+ for key, val := range blobTagsMap {
+ blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val})
+ }
+ return BlobTags{BlobTagSet: blobTagSet}
+}
+
+// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
+// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
+// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
+// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
-func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
+func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) {
var xRangeGetContentMD5 *bool
if rangeGetContentMD5 {
xRangeGetContentMD5 = &rangeGetContentMD5
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
- dr, err := b.blobClient.Download(ctx, nil, nil,
+ dr, err := b.blobClient.Download(ctx, nil, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
- ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
if err != nil {
return nil, err
}
@@ -81,13 +128,33 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
}, err
}
-// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
-// Note that deleting a blob also deletes all its snapshots.
+// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
+// Note 1: that deleting a blob also deletes all its snapshots.
+// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params.
+// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
+// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
- return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
+}
+
+// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+// Each call to this operation replaces all existing tags attached to the blob.
+// To remove all tags from the blob, call this operation with no tags set.
+// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
+func (b BlobURL) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) {
+ tags := SerializeBlobTags(blobTagsMap)
+ return b.blobClient.SetTags(ctx, timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, &tags)
+}
+
+// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
+// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
+func (b BlobURL) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (*BlobTags, error) {
+ return b.blobClient.GetTags(ctx, timeout, requestID, snapshot, versionID, ifTags)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
@@ -96,50 +163,71 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
return b.blobClient.Undelete(ctx, nil, nil)
}
-// SetTier operation sets the tier on a blob. The operation is allowed on a page
-// blob in a premium storage account and on a block blob in a blob storage account (locally
-// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
-// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
-// does not update the blob's ETag.
+// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account
+// and on a block blob in a blob storage account (locally redundant storage only).
+// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob.
+// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag.
+// Note: VersionId is an optional parameter which is part of request URL query params.
+// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
- return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
+ return b.blobClient.SetTier(ctx, tier, nil,
+ nil, // Blob versioning
+ nil, RehydratePriorityNone, nil, lac.pointers())
}
-// GetBlobProperties returns the blob's properties.
+// GetProperties returns the blob's properties.
+// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
+// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
+// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
-func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
+func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
- return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ return b.blobClient.GetProperties(ctx, nil,
+ nil, // Blob versioning
+ nil, ac.LeaseAccessConditions.pointers(),
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
-// SetBlobHTTPHeaders changes a blob's HTTP headers.
+// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.SetHTTPHeaders(ctx, nil,
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
&h.ContentDisposition, nil)
}
-// SetBlobMetadata changes a blob's metadata.
+// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
-func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
+func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
-func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) {
+func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
// performance hit.
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
- return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
+ return b.blobClient.CreateSnapshot(ctx, nil, metadata,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ ac.LeaseAccessConditions.pointers(), nil)
}
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
@@ -148,7 +236,9 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// RenewLease renews the blob's previously-acquired lease.
@@ -156,7 +246,9 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.RenewLease(ctx, leaseID, nil,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// ReleaseLease releases the blob's previously-acquired lease.
@@ -164,7 +256,9 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAcce
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
@@ -173,7 +267,9 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAc
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// ChangeLease changes the blob's lease ID.
@@ -181,7 +277,9 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
- nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
@@ -196,17 +294,22 @@ func leasePeriodPointer(period int32) (p *int32) {
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
-func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
+func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
-
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
- srcIfModifiedSince, srcIfUnmodifiedSince,
+ tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
+ nil, // source ifTags
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
- dstLeaseID, nil)
+ nil, // Blob ifTags
+ dstLeaseID,
+ nil,
+ blobTagsString, // Blob tags
+ nil)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
index 25a9b324fc..c47ed81b95 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
@@ -5,9 +5,6 @@ import (
"io"
"net/url"
- "encoding/base64"
- "encoding/binary"
-
"github.com/Azure/azure-pipeline-go/pipeline"
)
@@ -16,7 +13,7 @@ const (
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
- BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
+ BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000
@@ -48,6 +45,18 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
}
+// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id.
+// Pass "" to remove the snapshot returning a URL to the base blob.
+func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL {
+ p := NewBlobURLParts(bb.URL())
+ p.VersionID = versionId
+ return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
+}
+
+func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
+ return bb.blobClient.GetAccountInfo(ctx)
+}
+
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
@@ -55,36 +64,48 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
+func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
count, err := validateSeekableStreamAt0AndGetCount(body)
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
if err != nil {
return nil, err
}
- return bb.bbClient.Upload(ctx, body, count, nil,
+ return bb.bbClient.Upload(ctx, body, count, nil, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
- &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
- &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
- nil)
+ &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil,
+ blobTagsString, // Blob tags
+ )
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
-func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
+func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return nil, err
}
- return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil)
+ return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(),
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ nil)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
-func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
+func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockFromURLResponse, error) {
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
- return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
+ return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
+ destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
@@ -93,70 +114,46 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
-func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
- metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
+func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobCommitBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
- &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
+ &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
+ tier,
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil,
+ blobTagsString, // Blob tags
+ )
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
- return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-type BlockID [64]byte
-
-func (blockID BlockID) ToBase64() string {
- return base64.StdEncoding.EncodeToString(blockID[:])
-}
-
-func (blockID *BlockID) FromBase64(s string) error {
- *blockID = BlockID{} // Zero out the block ID
- _, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s))
- return err
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-type uuidBlockID BlockID
-
-func (ubi uuidBlockID) UUID() uuid {
- u := uuid{}
- copy(u[:], ubi[:len(u)])
- return u
-}
-
-func (ubi uuidBlockID) Number() uint32 {
- return binary.BigEndian.Uint32(ubi[len(uuid{}):])
-}
-
-func newUuidBlockID(u uuid) uuidBlockID {
- ubi := uuidBlockID{} // Create a new uuidBlockID
- copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
- // Block number defaults to 0
- return ubi
-}
-
-func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID {
- copy(ubi[:len(u)], u[:])
- return ubi
-}
-
-func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
- binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
- return ubi // Return the passed-in copy
-}
-
-func (ubi uuidBlockID) ToBase64() string {
- return BlockID(ubi).ToBase64()
+ return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(),
+ nil, // Blob ifTags
+ nil)
}
-func (ubi *uuidBlockID) FromBase64(s string) error {
- return (*BlockID)(ubi).FromBase64(s)
+// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
+// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
+func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobCopyFromURLResponse, error) {
+
+ srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
+ dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
+ dstLeaseID := dstac.LeaseAccessConditions.pointers()
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
+ return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier,
+ srcIfModifiedSince, srcIfUnmodifiedSince,
+ srcIfMatchETag, srcIfNoneMatchETag,
+ dstIfModifiedSince, dstIfUnmodifiedSince,
+ dstIfMatchETag, dstIfNoneMatchETag,
+ nil, // Blob ifTags
+ dstLeaseID, nil, srcContentMD5,
+ blobTagsString, // Blob tags
+ nil, // seal Blob
+ )
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
index 48adf08042..39fb5a1f66 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
@@ -32,6 +32,10 @@ func (c ContainerURL) String() string {
return u.String()
}
+func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
+ return c.client.GetAccountInfo(ctx)
+}
+
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
return NewContainerURL(c.URL(), p)
@@ -80,7 +84,9 @@ func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
- return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
+ return c.client.Create(ctx, nil, metadata, publicAccessType, nil,
+ nil, nil, // container encryption
+ )
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
@@ -269,7 +275,7 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob
// BlobListingDetails indicates what additional information the service should return with each blob.
type BlobListingDetails struct {
- Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool
+ Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions bool
}
// string produces the Include query parameter's value.
@@ -291,5 +297,11 @@ func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
if d.UncommittedBlobs {
items = append(items, ListBlobsIncludeItemUncommittedblobs)
}
+ if d.Tags {
+ items = append(items, ListBlobsIncludeItemTags)
+ }
+ if d.Versions {
+ items = append(items, ListBlobsIncludeItemVersions)
+ }
return items
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
index 8ee34c05a6..d02eff48d4 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
@@ -14,7 +14,7 @@ const (
// PageBlobPageBytes indicates the number of bytes in a page (512).
PageBlobPageBytes = 512
- // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
+ // PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
)
@@ -44,32 +44,55 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
}
-// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
+// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
+// Pass "" to remove the snapshot returning a URL to the base blob.
+func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL {
+ p := NewBlobURLParts(pb.URL())
+ p.VersionID = versionId
+ return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
+}
+
+func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
+ return pb.blobClient.GetAccountInfo(ctx)
+}
+
+// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
+func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*PageBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
- return pb.pbClient.Create(ctx, 0, size, nil,
+ blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
+ return pb.pbClient.Create(ctx, 0, size, nil, tier,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
- metadata, ac.LeaseAccessConditions.pointers(),
- &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
+ metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob tags
+ &sequenceNumber, nil,
+ blobTagsString, // Blob tags
+ )
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
-func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
+func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return nil, err
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
- return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
+ return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
@@ -77,24 +100,31 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
// The destOffset specifies the start offset of data in page blob will be written to.
// The count must be a multiple of 512 bytes.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
-func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) {
+func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesFromURLResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
- *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
+ *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil,
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
+ cpk.EncryptionScope, // CPK-N
+ destinationAccessConditions.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
-func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
+func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
return pb.pbClient.ClearPages(ctx, 0, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@@ -106,7 +136,23 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
return pb.pbClient.GetPageRanges(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
- ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
+}
+
+// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk.
+// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
+func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) {
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
+
+ return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot,
+ prevSnapshotURL, // Get managed disk diff
+ httpRange{offset: offset, count: count}.pointers(),
+ ac.LeaseAccessConditions.pointers(),
+ ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
+ nil)
}
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
@@ -114,21 +160,25 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
+ nil, // Get managed disk diff
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
+ nil, // Blob ifTags
nil)
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
+func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
+ cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
+ cpk.EncryptionScope, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
-// SetSequenceNumber sets the page blob's sequence number.
+// UpdateSequenceNumber sets the page blob's sequence number.
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
sn := &sequenceNumber
@@ -141,7 +191,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
sn, nil)
}
-// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
+// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
index 5974bc3a42..2d756782eb 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
@@ -4,6 +4,7 @@ import (
"context"
"net/url"
"strings"
+ "time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
@@ -38,6 +39,19 @@ func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInf
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil
}
+//TODO this was supposed to be generated
+//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion
+func NewKeyInfo(Start, Expiry time.Time) KeyInfo {
+ return KeyInfo{
+ Start: Start.UTC().Format(SASTimeFormat),
+ Expiry: Expiry.UTC().Format(SASTimeFormat),
+ }
+}
+
+func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
+ return s.client.GetAccountInfo(ctx)
+}
+
// URL returns the URL endpoint used by the ServiceURL object.
func (s ServiceURL) URL() url.URL {
return s.client.URL()
@@ -102,14 +116,14 @@ type ListContainersSegmentOptions struct {
// TODO: update swagger to generate this type?
}
-func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
+func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) {
if o.Prefix != "" {
prefix = &o.Prefix
}
if o.MaxResults != 0 {
maxResults = &o.MaxResults
}
- include = ListContainersIncludeType(o.Detail.string())
+ include = []ListContainersIncludeType{ListContainersIncludeType(o.Detail.string())}
return
}
@@ -117,15 +131,21 @@ func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListC
type ListContainersDetail struct {
// Tells the service whether to return metadata for each container.
Metadata bool
+
+ // Show containers that have been deleted when the soft-delete feature is enabled.
+ // Deleted bool
}
// string produces the Include query parameter's value.
func (d *ListContainersDetail) string() string {
- items := make([]string, 0, 1)
+ items := make([]string, 0, 2)
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
if d.Metadata {
items = append(items, string(ListContainersIncludeMetadata))
}
+ // if d.Deleted {
+ // items = append(items, string(ListContainersIncludeDeleted))
+ // }
if len(items) > 0 {
return strings.Join(items, ",")
}
@@ -143,3 +163,12 @@ func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServi
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
return bsu.client.GetStatistics(ctx, nil, nil)
}
+
+// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression.
+// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container.
+// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
+// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
+// To specify a container, eg. "@container=’containerName’ and Name = ‘C’"
+func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) {
+ return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults)
+}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
index bcc7b956c5..287e1e4b84 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
@@ -1,3 +1,3 @@
package azblob
-const serviceLibVersion = "0.7"
+const serviceLibVersion = "0.13"
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
deleted file mode 100644
index 3e8c7cba3d..0000000000
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build linux darwin freebsd openbsd netbsd dragonfly
-
-package azblob
-
-import (
- "os"
- "syscall"
-)
-
-type mmf []byte
-
-func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
- prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
- if writable {
- prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED
- }
- addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags)
- return mmf(addr), err
-}
-
-func (m *mmf) unmap() {
- err := syscall.Munmap(*m)
- *m = nil
- if err != nil {
- panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
- }
-}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
deleted file mode 100644
index 2743644e16..0000000000
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package azblob
-
-import (
- "os"
- "reflect"
- "syscall"
- "unsafe"
-)
-
-type mmf []byte
-
-func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
- prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
- if writable {
- prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
- }
- hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil)
- if hMMF == 0 {
- return nil, os.NewSyscallError("CreateFileMapping", errno)
- }
- defer syscall.CloseHandle(hMMF)
- addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
- m := mmf{}
- h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
- h.Data = addr
- h.Len = length
- h.Cap = h.Len
- return m, nil
-}
-
-func (m *mmf) unmap() {
- addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
- *m = mmf{}
- err := syscall.UnmapViewOfFile(addr)
- if err != nil {
- panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
- }
-}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
index 7c249a298d..ba99255c14 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
@@ -41,6 +41,5 @@ func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
NewRequestLogPolicyFactory(o.RequestLog),
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
-
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
index 0a362ea8b6..29a99a844f 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
@@ -62,15 +62,21 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
logLevel, forceLog = pipeline.LogWarning, true
}
- if err == nil { // We got a response from the service
- sc := response.Response().StatusCode
- if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
- logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
- } else {
- // For other status codes, we leave the level as is.
+ var sc int
+ if err == nil { // We got a valid response from the service
+ sc = response.Response().StatusCode
+ } else { // We got an error, so we should inspect if we got a response
+ if se, ok := err.(StorageError); ok {
+ if r := se.Response(); r != nil {
+ sc = r.StatusCode
+ }
}
- } else { // This error did not get an HTTP response from the service; upgrade the severity to Error
- logLevel, forceLog = pipeline.LogError, true
+ }
+
+ if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
+ logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
+ } else {
+ // For other status codes, we leave the level as is.
}
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
index 00531fee0b..0894fcc324 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
@@ -240,6 +240,8 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
} else {
action = "NoRetry: net.Error and in the non-retriable list"
}
+ } else if err == io.ErrUnexpectedEOF {
+ action = "Retry: unexpected EOF"
} else {
action = "NoRetry: unrecognized error"
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
index a75c7d1d2e..db8cee7b40 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
@@ -2,7 +2,7 @@ package azblob
import (
"context"
-
+ "errors"
"github.com/Azure/azure-pipeline-go/pipeline"
)
@@ -14,9 +14,22 @@ func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
id := request.Header.Get(xMsClientRequestID)
if id == "" { // Add a unique request ID if the caller didn't specify one already
- request.Header.Set(xMsClientRequestID, newUUID().String())
+ id = newUUID().String()
+ request.Header.Set(xMsClientRequestID, id)
}
- return next.Do(ctx, request)
+
+ resp, err := next.Do(ctx, request)
+
+ if err == nil && resp != nil {
+ val := resp.Response().Header.Values(xMsClientRequestID)
+ if len(val) > 0 {
+ if val[0] != id {
+ err = errors.New("client Request ID from request and response does not match")
+ }
+ }
+ }
+
+ return resp, err
}
})
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
index 3247aca662..ad38f597ed 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
@@ -41,6 +41,7 @@ type RetryReaderOptions struct {
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
+ injectedError error
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
NotifyFailedRead FailedReadNotifier
@@ -55,6 +56,8 @@ type RetryReaderOptions struct {
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
// which will be retried.
TreatEarlyCloseAsError bool
+
+ ClientProvidedKeyOptions ClientProvidedKeyOptions
}
// retryReader implements io.ReaderCloser methods.
@@ -117,7 +120,11 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
- err = &net.DNSError{IsTemporary: true}
+ if s.o.injectedError != nil {
+ err = s.o.injectedError
+ } else {
+ err = &net.DNSError{IsTemporary: true}
+ }
}
// We successfully read data or end EOF.
@@ -134,7 +141,8 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// Check the retry count and error code, and decide whether to retry.
retriesExhausted := try >= s.o.MaxRetryRequests
_, isNetError := err.(net.Error)
- willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
+ isUnexpectedEOF := err == io.ErrUnexpectedEOF
+ willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
// Notify, for logging purposes, of any failures
if s.o.NotifyFailedRead != nil {
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
index c000c48ec8..3010a6adca 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
@@ -76,7 +76,7 @@ func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Sh
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountSASPermissions struct {
- Read, Write, Delete, List, Add, Create, Update, Process bool
+ Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
}
// String produces the SAS permissions string for an Azure Storage account.
@@ -92,6 +92,9 @@ func (p AccountSASPermissions) String() string {
if p.Delete {
buffer.WriteRune('d')
}
+ if p.DeletePreviousVersion {
+ buffer.WriteRune('x')
+ }
if p.List {
buffer.WriteRune('l')
}
@@ -107,6 +110,12 @@ func (p AccountSASPermissions) String() string {
if p.Process {
buffer.WriteRune('p')
}
+ if p.Tag {
+ buffer.WriteRune('t')
+ }
+ if p.FilterByTags {
+ buffer.WriteRune('f')
+ }
return buffer.String()
}
@@ -131,8 +140,14 @@ func (p *AccountSASPermissions) Parse(s string) error {
p.Update = true
case 'p':
p.Process = true
+ case 'x':
+ p.Process = true
+ case 't':
+ p.Tag = true
+ case 'f':
+ p.FilterByTags = true
default:
- return fmt.Errorf("Invalid permission character: '%v'", r)
+ return fmt.Errorf("invalid permission character: '%v'", r)
}
}
return nil
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
index 11b1b2ba0b..f87ef2b7fc 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
@@ -1,6 +1,7 @@
package azblob
import (
+ "errors"
"net"
"net/url"
"strings"
@@ -25,11 +26,11 @@ const (
func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) {
ss := ""
if !startTime.IsZero() {
- ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
+ ss = formatSASTimeWithDefaultFormat(&startTime)
}
se := ""
if !expiryTime.IsZero() {
- se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
+ se = formatSASTimeWithDefaultFormat(&expiryTime)
}
sh := ""
if !snapshotTime.IsZero() {
@@ -39,7 +40,38 @@ func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (st
}
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
-const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
+const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
+var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details.
+
+// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ".
+func formatSASTimeWithDefaultFormat(t *time.Time) string {
+ return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
+}
+
+// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default.
+func formatSASTime(t *time.Time, format string) string {
+ if format != "" {
+ return t.Format(format)
+ }
+ return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
+}
+
+// parseSASTimeString try to parse sas time string.
+func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) {
+ for _, sasTimeFormat := range SASTimeFormats {
+ t, err = time.Parse(sasTimeFormat, val)
+ if err == nil {
+ timeFormat = sasTimeFormat
+ break
+ }
+ }
+
+ if err != nil {
+ err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details")
+ }
+
+ return
+}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
@@ -74,6 +106,10 @@ type SASQueryParameters struct {
signedExpiry time.Time `param:"ske"`
signedService string `param:"sks"`
signedVersion string `param:"skv"`
+
+ // private member used for startTime and expiryTime formatting.
+ stTimeFormat string
+ seTimeFormat string
}
func (p *SASQueryParameters) SignedOid() string {
@@ -202,9 +238,9 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
case "snapshot":
p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val)
case "st":
- p.startTime, _ = time.Parse(SASTimeFormat, val)
+ p.startTime, p.stTimeFormat, _ = parseSASTimeString(val)
case "se":
- p.expiryTime, _ = time.Parse(SASTimeFormat, val)
+ p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
@@ -268,10 +304,10 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
v.Add("spr", string(p.protocol))
}
if !p.startTime.IsZero() {
- v.Add("st", p.startTime.Format(SASTimeFormat))
+ v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat))
}
if !p.expiryTime.IsZero() {
- v.Add("se", p.expiryTime.Format(SASTimeFormat))
+ v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat))
}
if len(p.ipRange.Start) > 0 {
v.Add("sip", p.ipRange.String())
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
index 765beb2415..d09ddcffcc 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
@@ -114,6 +114,9 @@ const (
// ServiceCodeResourceNotFound means the specified resource does not exist (404).
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
+ // ServiceCodeNoAuthenticationInformation means the specified authentication for the resource does not exist (401).
+ ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation"
+
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
ServiceCodeServerBusy ServiceCodeType = "ServerBusy"
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go
index e7872a8a3f..a3cbd9817b 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go
@@ -79,7 +79,7 @@ func (e *storageError) Error() string {
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
func (e *storageError) Temporary() bool {
if e.response != nil {
- if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
+ if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) {
return true
}
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
index 719bcb624e..cb92f7e507 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
@@ -34,20 +34,30 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
// information, see Setting
// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to
-// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
-// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
-// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
-// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
-// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
-// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
-// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
-// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
-// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
-// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
-// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
+// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be
+// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and
+// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If
+// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the
+// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
+// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A
+// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this
+// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 -
+// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in
+// the request. If not specified, encryption is performed with the root account encryption key. For more information,
+// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided
+// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm
+// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the
+// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the
+// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
+// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
+// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
+// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
+// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
+// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a
+// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a
+// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+// analytics logging is enabled.
+func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@@ -56,7 +66,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -68,7 +78,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
}
// appendBlockPreparer prepares the AppendBlock request.
-func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -83,6 +93,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
if transactionalContentMD5 != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
}
+ if transactionalContentCrc64 != nil {
+ req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -92,6 +105,18 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
if appendPosition != nil {
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -104,6 +129,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -128,33 +156,44 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
//
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
-// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
-// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
-// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
-// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
-// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
-// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
-// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
-// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
-// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
-// only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to
-// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
-// matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified
-// since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it
-// has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs
-// with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
+// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to
+// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data
+// provided in the request. If not specified, encryption is performed with the root account encryption key. For more
+// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
+// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
+// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
+// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this
+// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append
+// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value
+// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
+// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A
+// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this
+// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 -
+// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified
+// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
+// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a
+// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is
+// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is
+// specify this header value to operate only on a blob if it has been modified since the specified date/time.
+// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
+// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
+// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
+// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+// analytics logging is enabled.
+func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
+ req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -166,7 +205,7 @@ func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL
}
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
-func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -184,7 +223,25 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont
if sourceContentMD5 != nil {
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
}
+ if sourceContentcrc64 != nil {
+ req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
+ }
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ if transactionalContentMD5 != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
+ }
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -206,6 +263,9 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -255,20 +315,29 @@ func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Respons
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
-// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
-// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
-// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
-// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
-// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-// analytics logging is enabled.
-func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
+// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
+// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
+// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
+// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
+// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
+// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
+// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince
+// is specify this header value to operate only on a blob if it has been modified since the specified date/time.
+// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
+// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
+// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on
+// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value
+// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+// blobTagsString is optional. Used to set blob tags in various blob operations.
+func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*AppendBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@@ -280,7 +349,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
}
// createPreparer prepares the Create request.
-func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -317,6 +386,18 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -329,10 +410,16 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
req.Header.Set("x-ms-blob-type", "AppendBlob")
return req, nil
}
@@ -347,3 +434,84 @@ func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline
resp.Response().Body.Close()
return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err
}
+
+// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12
+// version or later.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if
+// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is
+// specify this header value to operate only on a blob if it has been modified since the specified date/time.
+// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
+// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
+// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional
+// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will
+// succeed only if the append position is equal to this number. If it is not, the request will fail with the
+// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
+func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*AppendBlobSealResponse), err
+}
+
+// sealPreparer prepares the Seal request.
+func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "seal")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ if leaseID != nil {
+ req.Header.Set("x-ms-lease-id", *leaseID)
+ }
+ if ifModifiedSince != nil {
+ req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if ifUnmodifiedSince != nil {
+ req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if ifMatch != nil {
+ req.Header.Set("If-Match", string(*ifMatch))
+ }
+ if ifNoneMatch != nil {
+ req.Header.Set("If-None-Match", string(*ifNoneMatch))
+ }
+ if appendPosition != nil {
+ req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
+ }
+ return req, nil
+}
+
+// sealResponder handles the response to the Seal request.
+func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &AppendBlobSealResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
index 5e30263a0b..036bbfcfad 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
@@ -4,8 +4,10 @@ package azblob
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
+ "bytes"
"context"
"encoding/base64"
+ "encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
@@ -100,16 +102,17 @@ func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipe
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
-// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
-// recorded in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) {
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
+// analytics logs when storage analytics logging is enabled.
+func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobAcquireLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -121,7 +124,7 @@ func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, durat
}
// acquireLeasePreparer prepares the AcquireLease request.
-func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -150,6 +153,9 @@ func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, p
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -183,16 +189,17 @@ func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) {
+// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is
+// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
+// storage analytics logging is enabled.
+func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobBreakLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -204,7 +211,7 @@ func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPe
}
// breakLeasePreparer prepares the BreakLease request.
-func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -230,6 +237,9 @@ func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32,
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -261,16 +271,17 @@ func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.R
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
-// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
-// in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) {
+// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+// logs when storage analytics logging is enabled.
+func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobChangeLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -282,7 +293,7 @@ func (client blobClient) ChangeLease(ctx context.Context, leaseID string, propos
}
// changeLeasePreparer prepares the ChangeLease request.
-func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -307,6 +318,9 @@ func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID str
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -339,25 +353,29 @@ func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
-// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
-// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
-// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
-// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
-// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
-// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
-// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
-// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCopyFromURLResponse, error) {
+// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob.
+// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the
+// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not
+// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a
+// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
+// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
+// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
+// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
+// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL
+// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation
+// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated,
+// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
+// enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be read from the copy
+// source. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the
+// sealed state of the destination blob. Service version 2019-12-12 and newer.
+func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, sealBlob *bool) (*BlobCopyFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.copyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
+ req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, sourceContentMD5, blobTagsString, sealBlob)
if err != nil {
return nil, err
}
@@ -369,7 +387,7 @@ func (client blobClient) CopyFromURL(ctx context.Context, copySource string, tim
}
// copyFromURLPreparer prepares the CopyFromURL request.
-func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
+func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, sealBlob *bool) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -384,6 +402,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32,
req.Header.Set("x-ms-meta-"+k, v)
}
}
+ if tier != AccessTierNone {
+ req.Header.Set("x-ms-access-tier", string(tier))
+ }
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -408,6 +429,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32,
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-copy-source", copySource)
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
@@ -416,6 +440,15 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32,
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if sourceContentMD5 != nil {
+ req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
+ }
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
+ if sealBlob != nil {
+ req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob))
+ }
req.Header.Set("x-ms-requires-sync", "true")
return req, nil
}
@@ -440,21 +473,30 @@ func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
-// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
-// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
+// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use
+// to encrypt the data provided in the request. If not specified, encryption is performed with the root account
+// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the
+// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
+// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
+// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version
+// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the
+// request. If not specified, encryption is performed with the default account encryption scope. For more information,
+// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a
+// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
-// without a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and
-// matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
-// in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
+// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+// logs when storage analytics logging is enabled.
+func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
+ req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID)
if err != nil {
return nil, err
}
@@ -466,7 +508,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met
}
// createSnapshotPreparer prepares the CreateSnapshot request.
-func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
+func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -482,6 +524,18 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str
req.Header.Set("x-ms-meta-"+k, v)
}
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -494,6 +548,9 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -529,7 +586,9 @@ func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeli
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// retrieve. For more information on working with blob snapshots, see Creating
-// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present,
+// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the
+// timeout parameter is expressed in seconds. For more information, see Setting
// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one
@@ -538,16 +597,17 @@ func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeli
// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) {
+// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is
+// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
+// storage analytics logging is enabled.
+func (client blobClient) Delete(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobDeleteResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.deletePreparer(snapshot, versionID, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -559,7 +619,7 @@ func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *
}
// deletePreparer prepares the Delete request.
-func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) deletePreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("DELETE", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -568,6 +628,9 @@ func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseI
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
@@ -590,6 +653,9 @@ func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseI
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -614,25 +680,35 @@ func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Respo
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// retrieve. For more information on working with blob snapshots, see Creating
-// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present,
+// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the
+// timeout parameter is expressed in seconds. For more information, see Setting
// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for
-// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value
-// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
-// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify
-// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only
-// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
-// limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) {
+// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and
+// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less
+// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data
+// provided in the request. If not specified, encryption is performed with the root account encryption key. For more
+// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
+// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
+// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
+// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
+// analytics logs when storage analytics logging is enabled.
+func (client blobClient) Download(ctx context.Context, snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*downloadResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.downloadPreparer(snapshot, versionID, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -644,7 +720,7 @@ func (client blobClient) Download(ctx context.Context, snapshot *string, timeout
}
// downloadPreparer prepares the Download request.
-func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) downloadPreparer(snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -653,6 +729,9 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
@@ -666,6 +745,18 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang
if rangeGetContentMD5 != nil {
req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5))
}
+ if rangeGetContentCRC64 != nil {
+ req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64))
+ }
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -678,6 +769,9 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -694,6 +788,86 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
return &downloadResponse{rawResponse: resp.Response()}, err
}
+// GetAccessControl get the owner, group, permissions, or access control list for a blob.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. upn is optional. Valid only when Hierarchical Namespace is enabled for the
+// account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will
+// be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be
+// returned as Azure Active Directory Object IDs. The default value is false. leaseID is if specified, the operation
+// only succeeds if the resource's lease is active and matches this ID. ifMatch is specify an ETag value to operate
+// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
+// matching value. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since
+// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
+// modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character
+// limit that is recorded in the analytics logs when storage analytics logging is enabled.
+func (client blobClient) GetAccessControl(ctx context.Context, timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobGetAccessControlResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getAccessControlPreparer(timeout, upn, leaseID, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessControlResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*BlobGetAccessControlResponse), err
+}
+
+// getAccessControlPreparer prepares the GetAccessControl request.
+func (client blobClient) getAccessControlPreparer(timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("HEAD", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if upn != nil {
+ params.Set("upn", strconv.FormatBool(*upn))
+ }
+ params.Set("action", "getAccessControl")
+ req.URL.RawQuery = params.Encode()
+ if leaseID != nil {
+ req.Header.Set("x-ms-lease-id", *leaseID)
+ }
+ if ifMatch != nil {
+ req.Header.Set("If-Match", string(*ifMatch))
+ }
+ if ifNoneMatch != nil {
+ req.Header.Set("If-None-Match", string(*ifNoneMatch))
+ }
+ if ifModifiedSince != nil {
+ req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if ifUnmodifiedSince != nil {
+ req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// getAccessControlResponder handles the response to the GetAccessControl request.
+func (client blobClient) getAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &BlobGetAccessControlResponse{rawResponse: resp.Response()}, err
+}
+
// GetAccountInfo returns the sku name and account kind
func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
req, err := client.getAccountInfoPreparer()
@@ -738,23 +912,31 @@ func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeli
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// retrieve. For more information on working with blob snapshots, see Creating
-// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present,
+// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the
+// timeout parameter is expressed in seconds. For more information, see Setting
// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
-// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
-// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
-// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) {
+// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
+// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
+// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
+// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
+// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
+// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
+// analytics logs when storage analytics logging is enabled.
+func (client blobClient) GetProperties(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobGetPropertiesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.getPropertiesPreparer(snapshot, versionID, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -766,7 +948,7 @@ func (client blobClient) GetProperties(ctx context.Context, snapshot *string, ti
}
// getPropertiesPreparer prepares the GetProperties request.
-func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) getPropertiesPreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("HEAD", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -775,6 +957,9 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32,
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
@@ -782,6 +967,15 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32,
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -794,6 +988,9 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32,
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -812,6 +1009,191 @@ func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipelin
return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err
}
+// GetTags the Get Tags operation enables users to get the tags associated with a blob.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled. snapshot is the
+// snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+// information on working with blob snapshots, see Creating
+// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present,
+// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. ifTags is specify a
+// SQL where clause on blob tags to operate only on blobs with a matching value.
+func (client blobClient) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (*BlobTags, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.getTagsPreparer(timeout, requestID, snapshot, versionID, ifTags)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getTagsResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*BlobTags), err
+}
+
+// getTagsPreparer prepares the GetTags request.
+func (client blobClient) getTagsPreparer(timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if snapshot != nil && len(*snapshot) > 0 {
+ params.Set("snapshot", *snapshot)
+ }
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
+ params.Set("comp", "tags")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
+ return req, nil
+}
+
+// getTagsResponder handles the response to the GetTags request.
+func (client blobClient) getTagsResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &BlobTags{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
+// TODO funky quick query code
+//// Query the Query operation enables users to select/project on blob data by providing simple query expressions.
+////
+//// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
+//// retrieve. For more information on working with blob snapshots, see Creating
+//// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+//// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
+//// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
+//// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
+//// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+//// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+//// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+//// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
+//// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
+//// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
+//// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
+//// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
+//// recorded in the analytics logs when storage analytics logging is enabled.
+//func (client blobClient) Query(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*QueryResponse, error) {
+// if err := validate([]validation{
+// {targetValue: timeout,
+// constraints: []constraint{{target: "timeout", name: null, rule: false,
+// chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+// return nil, err
+// }
+// req, err := client.queryPreparer(snapshot, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+// if err != nil {
+// return nil, err
+// }
+// resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.queryResponder}, req)
+// if err != nil {
+// return nil, err
+// }
+// return resp.(*QueryResponse), err
+//}
+//
+//// queryPreparer prepares the Query request.
+//func (client blobClient) queryPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+// req, err := pipeline.NewRequest("POST", client.url, nil)
+// if err != nil {
+// return req, pipeline.NewError(err, "failed to create request")
+// }
+// params := req.URL.Query()
+// if snapshot != nil && len(*snapshot) > 0 {
+// params.Set("snapshot", *snapshot)
+// }
+// if timeout != nil {
+// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+// }
+// params.Set("comp", "query")
+// req.URL.RawQuery = params.Encode()
+// if leaseID != nil {
+// req.Header.Set("x-ms-lease-id", *leaseID)
+// }
+// if encryptionKey != nil {
+// req.Header.Set("x-ms-encryption-key", *encryptionKey)
+// }
+// if encryptionKeySha256 != nil {
+// req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+// }
+// if encryptionAlgorithm != EncryptionAlgorithmNone {
+// req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+// }
+// if ifModifiedSince != nil {
+// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if ifUnmodifiedSince != nil {
+// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if ifMatch != nil {
+// req.Header.Set("If-Match", string(*ifMatch))
+// }
+// if ifNoneMatch != nil {
+// req.Header.Set("If-None-Match", string(*ifNoneMatch))
+// }
+// req.Header.Set("x-ms-version", ServiceVersion)
+// if requestID != nil {
+// req.Header.Set("x-ms-client-request-id", *requestID)
+// }
+// b, err := xml.Marshal(queryRequest)
+// if err != nil {
+// return req, pipeline.NewError(err, "failed to marshal request body")
+// }
+// req.Header.Set("Content-Type", "application/xml")
+// err = req.SetBody(bytes.NewReader(b))
+// if err != nil {
+// return req, pipeline.NewError(err, "failed to set request body")
+// }
+// return req, nil
+//}
+//
+//// queryResponder handles the response to the Query request.
+//func (client blobClient) queryResponder(resp pipeline.Response) (pipeline.Response, error) {
+// err := validateResponse(resp, http.StatusOK, http.StatusPartialContent)
+// if resp == nil {
+// return nil, err
+// }
+// return &QueryResponse{rawResponse: resp.Response()}, err
+//}
+
// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
// operations
//
@@ -822,16 +1204,17 @@ func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipelin
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
-// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
-// in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) {
+// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+// logs when storage analytics logging is enabled.
+func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobReleaseLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -843,7 +1226,7 @@ func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeo
}
// releaseLeasePreparer prepares the ReleaseLease request.
-func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -867,6 +1250,9 @@ func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, if
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -886,6 +1272,147 @@ func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline
return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err
}
+// TODO funky rename API
+//// Rename rename a blob/file. By default, the destination is overwritten and if the destination already exists and has
+//// a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see
+//// [Specifying Conditional Headers for Blob Service
+//// Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+//// To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+////
+//// renameSource is the file or directory to be renamed. The value must have the following format:
+//// "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the existing properties;
+//// otherwise, the existing properties will be preserved. timeout is the timeout parameter is expressed in seconds. For
+//// more information, see Setting
+//// Timeouts for Blob Service Operations. directoryProperties is optional. User-defined properties to be stored
+//// with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...",
+//// where each value is base64 encoded. posixPermissions is optional and only valid if Hierarchical Namespace is enabled
+//// for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may
+//// be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and
+//// 4-digit octal notation (e.g. 0766) are supported. posixUmask is only valid if Hierarchical Namespace is enabled for
+//// the account. This umask restricts permission settings for file and directory, and will only be applied when default
+//// Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be
+//// disabled. Otherwise the corresponding permission will be determined by the permission. A 4-digit octal notation
+//// (e.g. 0022) is supported here. If no umask was specified, a default umask - 0027 will be used. cacheControl is cache
+//// control for given resource contentType is content type for given resource contentEncoding is content encoding for
+//// given resource contentLanguage is content language for given resource contentDisposition is content disposition for
+//// given resource leaseID is if specified, the operation only succeeds if the resource's lease is active and matches
+//// this ID. sourceLeaseID is a lease ID for the source path. If specified, the source path must have an active lease
+//// and the lease ID must match. ifModifiedSince is specify this header value to operate only on a blob if it has been
+//// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
+//// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
+//// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
+//// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the
+//// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not
+//// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a
+//// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
+//// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+//// logs when storage analytics logging is enabled.
+//func (client blobClient) Rename(ctx context.Context, renameSource string, timeout *int32, directoryProperties *string, posixPermissions *string, posixUmask *string, cacheControl *string, contentType *string, contentEncoding *string, contentLanguage *string, contentDisposition *string, leaseID *string, sourceLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlobRenameResponse, error) {
+// if err := validate([]validation{
+// {targetValue: timeout,
+// constraints: []constraint{{target: "timeout", name: null, rule: false,
+// chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+// return nil, err
+// }
+// req, err := client.renamePreparer(renameSource, timeout, directoryProperties, posixPermissions, posixUmask, cacheControl, contentType, contentEncoding, contentLanguage, contentDisposition, leaseID, sourceLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
+// if err != nil {
+// return nil, err
+// }
+// resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renameResponder}, req)
+// if err != nil {
+// return nil, err
+// }
+// return resp.(*BlobRenameResponse), err
+//}
+//
+//// renamePreparer prepares the Rename request.
+//func (client blobClient) renamePreparer(renameSource string, timeout *int32, directoryProperties *string, posixPermissions *string, posixUmask *string, cacheControl *string, contentType *string, contentEncoding *string, contentLanguage *string, contentDisposition *string, leaseID *string, sourceLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+// req, err := pipeline.NewRequest("PUT", client.url, nil)
+// if err != nil {
+// return req, pipeline.NewError(err, "failed to create request")
+// }
+// params := req.URL.Query()
+// if timeout != nil {
+// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+// }
+// if pathRenameMode != PathRenameModeNone {
+// params.Set("mode", string(client.PathRenameMode))
+// }
+// req.URL.RawQuery = params.Encode()
+// req.Header.Set("x-ms-rename-source", renameSource)
+// if directoryProperties != nil {
+// req.Header.Set("x-ms-properties", *directoryProperties)
+// }
+// if posixPermissions != nil {
+// req.Header.Set("x-ms-permissions", *posixPermissions)
+// }
+// if posixUmask != nil {
+// req.Header.Set("x-ms-umask", *posixUmask)
+// }
+// if cacheControl != nil {
+// req.Header.Set("x-ms-cache-control", *cacheControl)
+// }
+// if contentType != nil {
+// req.Header.Set("x-ms-content-type", *contentType)
+// }
+// if contentEncoding != nil {
+// req.Header.Set("x-ms-content-encoding", *contentEncoding)
+// }
+// if contentLanguage != nil {
+// req.Header.Set("x-ms-content-language", *contentLanguage)
+// }
+// if contentDisposition != nil {
+// req.Header.Set("x-ms-content-disposition", *contentDisposition)
+// }
+// if leaseID != nil {
+// req.Header.Set("x-ms-lease-id", *leaseID)
+// }
+// if sourceLeaseID != nil {
+// req.Header.Set("x-ms-source-lease-id", *sourceLeaseID)
+// }
+// if ifModifiedSince != nil {
+// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if ifUnmodifiedSince != nil {
+// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if ifMatch != nil {
+// req.Header.Set("If-Match", string(*ifMatch))
+// }
+// if ifNoneMatch != nil {
+// req.Header.Set("If-None-Match", string(*ifNoneMatch))
+// }
+// if sourceIfModifiedSince != nil {
+// req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if sourceIfUnmodifiedSince != nil {
+// req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123))
+// }
+// if sourceIfMatch != nil {
+// req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch))
+// }
+// if sourceIfNoneMatch != nil {
+// req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
+// }
+// req.Header.Set("x-ms-version", ServiceVersion)
+// if requestID != nil {
+// req.Header.Set("x-ms-client-request-id", *requestID)
+// }
+// return req, nil
+//}
+//
+//// renameResponder handles the response to the Rename request.
+//func (client blobClient) renameResponder(resp pipeline.Response) (pipeline.Response, error) {
+// err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+// if resp == nil {
+// return nil, err
+// }
+// io.Copy(ioutil.Discard, resp.Response().Body)
+// resp.Response().Body.Close()
+// return &BlobRenameResponse{rawResponse: resp.Response()}, err
+//}
+
// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
// operations
//
@@ -896,16 +1423,17 @@ func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
-// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
-// in the analytics logs when storage analytics logging is enabled.
-func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) {
+// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+// logs when storage analytics logging is enabled.
+func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobRenewLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -917,7 +1445,7 @@ func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout
}
// renewLeasePreparer prepares the RenewLease request.
-func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -941,6 +1469,9 @@ func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifMo
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -960,6 +1491,159 @@ func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.R
return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err
}
+// SetAccessControl set the owner, group, permissions, or access control list for a blob.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
+// lease is active and matches this ID. owner is optional. The owner of the blob or directory. group is optional. The
+// owning group of the blob or directory. posixPermissions is optional and only valid if Hierarchical Namespace is
+// enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each
+// class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic
+// (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. posixACL is sets POSIX access control rights on
+// files and directories. The value is a comma-separated list of access control entries. Each access control entry
+// (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format
+// "[scope:][type]:[id]:[permissions]". ifMatch is specify an ETag value to operate only on blobs with a matching
+// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifModifiedSince is
+// specify this header value to operate only on a blob if it has been modified since the specified date/time.
+// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
+// specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
+// recorded in the analytics logs when storage analytics logging is enabled.
+func (client blobClient) SetAccessControl(ctx context.Context, timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobSetAccessControlResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setAccessControlPreparer(timeout, leaseID, owner, group, posixPermissions, posixACL, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessControlResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*BlobSetAccessControlResponse), err
+}
+
+// setAccessControlPreparer prepares the SetAccessControl request.
+func (client blobClient) setAccessControlPreparer(timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PATCH", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("action", "setAccessControl")
+ req.URL.RawQuery = params.Encode()
+ if leaseID != nil {
+ req.Header.Set("x-ms-lease-id", *leaseID)
+ }
+ if owner != nil {
+ req.Header.Set("x-ms-owner", *owner)
+ }
+ if group != nil {
+ req.Header.Set("x-ms-group", *group)
+ }
+ if posixPermissions != nil {
+ req.Header.Set("x-ms-permissions", *posixPermissions)
+ }
+ if posixACL != nil {
+ req.Header.Set("x-ms-acl", *posixACL)
+ }
+ if ifMatch != nil {
+ req.Header.Set("If-Match", string(*ifMatch))
+ }
+ if ifNoneMatch != nil {
+ req.Header.Set("If-None-Match", string(*ifNoneMatch))
+ }
+ if ifModifiedSince != nil {
+ req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if ifUnmodifiedSince != nil {
+ req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
+ }
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ req.Header.Set("x-ms-version", ServiceVersion)
+ return req, nil
+}
+
+// setAccessControlResponder handles the response to the SetAccessControl request.
+func (client blobClient) setAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &BlobSetAccessControlResponse{rawResponse: resp.Response()}, err
+}
+
+// SetExpiry sets the time a blob will expire and be deleted.
+//
+// expiryOptions is required. Indicates mode of the expiry time timeout is the timeout parameter is expressed in
+// seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled. expiresOn is the
+// time to set the blob to expiry
+func (client blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (*BlobSetExpiryResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setExpiryPreparer(expiryOptions, timeout, requestID, expiresOn)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setExpiryResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*BlobSetExpiryResponse), err
+}
+
+// setExpiryPreparer prepares the SetExpiry request.
+func (client blobClient) setExpiryPreparer(expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "expiry")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ req.Header.Set("x-ms-expiry-option", string(expiryOptions))
+ if expiresOn != nil {
+ req.Header.Set("x-ms-expiry-time", *expiresOn)
+ }
+ return req, nil
+}
+
+// setExpiryResponder handles the response to the SetExpiry request.
+func (client blobClient) setExpiryResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &BlobSetExpiryResponse{rawResponse: resp.Response()}, err
+}
+
// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob
//
// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. versionID is the version id parameter is an opaque DateTime value that,
+// when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+// transactionalContentMD5 is specify the transactional md5 for the body, to be validated by the service.
+// transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated by the service. requestID
+// is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
+// storage analytics logging is enabled. ifTags is specify a SQL where clause on blob tags to operate only on blobs
+// with a matching value. tags is blob tags
+func (client blobClient) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, tags *BlobTags) (*BlobSetTagsResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.setTagsPreparer(timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, tags)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTagsResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*BlobSetTagsResponse), err
+}
+
+// setTagsPreparer prepares the SetTags request.
+func (client blobClient) setTagsPreparer(timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, tags *BlobTags) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
+ params.Set("comp", "tags")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if transactionalContentMD5 != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
+ }
+ if transactionalContentCrc64 != nil {
+ req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
+ }
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
+ b, err := xml.Marshal(tags)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to marshal request body")
+ }
+ req.Header.Set("Content-Type", "application/xml")
+ err = req.SetBody(bytes.NewReader(b))
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to set request body")
+ }
+ return req, nil
+}
+
+// setTagsResponder handles the response to the SetTags request.
+func (client blobClient) setTagsResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusNoContent)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &BlobSetTagsResponse{rawResponse: resp.Response()}, err
+}
+
// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage
// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier
// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive
// storage type. This operation does not update the blob's ETag.
//
-// tier is indicates the tier to be set on the blob. timeout is the timeout parameter is expressed in seconds. For more
-// information, see Creating
+// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present,
+// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the
+// timeout parameter is expressed in seconds. For more information, see Setting
-// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
-// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if
-// specified, the operation only succeeds if the resource's lease is active and matches this ID.
-func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (*BlobSetTierResponse, error) {
+// Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to
+// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that
+// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation
+// only succeeds if the resource's lease is active and matches this ID.
+func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (*BlobSetTierResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.setTierPreparer(tier, timeout, requestID, leaseID)
+ req, err := client.setTierPreparer(tier, snapshot, versionID, timeout, rehydratePriority, requestID, leaseID)
if err != nil {
return nil, err
}
@@ -1174,18 +1972,27 @@ func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeo
}
// setTierPreparer prepares the SetTier request.
-func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (pipeline.Request, error) {
+func (client blobClient) setTierPreparer(tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
+ if snapshot != nil && len(*snapshot) > 0 {
+ params.Set("snapshot", *snapshot)
+ }
+ if versionID != nil && len(*versionID) > 0 {
+ params.Set("versionid", *versionID)
+ }
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "tier")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-access-tier", string(tier))
+ if rehydratePriority != RehydratePriorityNone {
+ req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority))
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -1219,25 +2026,30 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
-// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
-// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
-// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
-// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
-// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
-// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
-// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
-// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
+// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob.
+// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob.
+// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the
+// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not
+// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a
+// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
+// sourceIfTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
+// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
+// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
+// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL
+// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation
+// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated,
+// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
+// enabled. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the
+// sealed state of the destination blob. Service version 2019-12-12 and newer.
+func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool) (*BlobStartCopyFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
+ req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, blobTagsString, sealBlob)
if err != nil {
return nil, err
}
@@ -1249,7 +2061,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string
}
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
-func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
+func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -1264,6 +2076,12 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
req.Header.Set("x-ms-meta-"+k, v)
}
}
+ if tier != AccessTierNone {
+ req.Header.Set("x-ms-access-tier", string(tier))
+ }
+ if rehydratePriority != RehydratePriorityNone {
+ req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority))
+ }
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -1276,6 +2094,9 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
if sourceIfNoneMatch != nil {
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch))
}
+ if sourceIfTags != nil {
+ req.Header.Set("x-ms-source-if-tags", *sourceIfTags)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -1288,6 +2109,9 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-copy-source", copySource)
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
@@ -1296,6 +2120,12 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
+ if sealBlob != nil {
+ req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob))
+ }
return req, nil
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
index 955f7d1903..0008273abc 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
@@ -43,27 +43,39 @@ func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
-// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
-// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
-// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
-// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
-// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
-// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
-// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
-// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
-// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
-// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
-// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
-// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-// analytics logging is enabled.
-func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
+// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be
+// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated
+// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no
+// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination
+// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata,
+// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names
+// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for
+// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches
+// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional.
+// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is
+// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage
+// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the
+// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key
+// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
+// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to
+// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account
+// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional.
+// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if
+// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
+// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
+// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
+// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
+// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob
+// operations.
+func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobCommitBlockListResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@@ -75,7 +87,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL
}
// commitBlockListPreparer prepares the CommitBlockList request.
-func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -101,6 +113,12 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
}
+ if transactionalContentMD5 != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
+ }
+ if transactionalContentCrc64 != nil {
+ req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
+ }
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
@@ -112,6 +130,21 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
+ if tier != AccessTierNone {
+ req.Header.Set("x-ms-access-tier", string(tier))
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -124,10 +157,16 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
b, err := xml.Marshal(blocks)
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
@@ -161,16 +200,17 @@ func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (
// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting
// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
-// limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
+// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with
+// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
+// recorded in the analytics logs when storage analytics logging is enabled.
+func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID)
+ req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -182,7 +222,7 @@ func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockLi
}
// getBlockListPreparer prepares the GetBlockList request.
-func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
+func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -200,6 +240,9 @@ func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snaps
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -238,13 +281,22 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
-// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
+// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the
+// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
// seconds. For more information, see Setting
// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
-// limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
+// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
+// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
+// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
+// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
+// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
+// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
+// analytics logs when storage analytics logging is enabled.
+func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@@ -253,7 +305,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID)
+ req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID)
if err != nil {
return nil, err
}
@@ -265,7 +317,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
}
// stageBlockPreparer prepares the StageBlock request.
-func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
+func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -281,9 +333,24 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i
if transactionalContentMD5 != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
}
+ if transactionalContentCrc64 != nil {
+ req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -309,24 +376,33 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
+// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the
// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
// more information, see Setting
-// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if
-// it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate
-// only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to
-// operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs
-// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
-// recorded in the analytics logs when storage analytics logging is enabled.
-func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
+// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt
+// the data provided in the request. If not specified, encryption is performed with the root account encryption key.
+// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
+// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
+// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
+// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later.
+// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified,
+// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for
+// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and
+// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been
+// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a
+// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate
+// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a
+// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
+// in the analytics logs when storage analytics logging is enabled.
+func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
+ req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -338,7 +414,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str
}
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
-func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -358,6 +434,21 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
if sourceContentMD5 != nil {
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
}
+ if sourceContentcrc64 != nil {
+ req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
+ }
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -400,27 +491,37 @@ func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response)
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see Setting
-// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified,
-// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
-// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
-// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
-// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
-// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
-// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
-// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
-// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
-// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
-// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
-// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
-// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
-// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
-// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
-// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
-// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
-// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
+// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to
+// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property
+// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content
+// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage
+// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with
+// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated,
+// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets
+// the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are
+// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more
+// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not
+// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the
+// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
+// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
+// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies
+// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed
+// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services.
+// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key
+// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the
+// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is
+// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data
+// provided in the request. If not specified, encryption is performed with the default account encryption scope. For
+// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set
+// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since
+// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
+// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
+// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a
+// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-// analytics logging is enabled.
-func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
+// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations.
+func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobUploadResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@@ -429,7 +530,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@@ -441,7 +542,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
}
// uploadPreparer prepares the Upload request.
-func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -451,6 +552,9 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
req.URL.RawQuery = params.Encode()
+ if transactionalContentMD5 != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
+ }
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
@@ -478,6 +582,21 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
+ if tier != AccessTierNone {
+ req.Header.Set("x-ms-access-tier", string(tier))
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -490,10 +609,16 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
req.Header.Set("x-ms-blob-type", "BlockBlob")
return req, nil
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
index 1b3ea2e4b4..d697e37d7d 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
@@ -10,7 +10,7 @@ import (
const (
// ServiceVersion specifies the version of the operations used in this package.
- ServiceVersion = "2018-11-09"
+ ServiceVersion = "2019-12-12"
)
// managementClient is the base client for Azblob.
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
index 599e8118cc..88ff7df311 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
@@ -259,14 +259,18 @@ func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipe
// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be
// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) {
+// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on
+// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and
+// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the
+// container.
+func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.createPreparer(timeout, metadata, access, requestID)
+ req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride)
if err != nil {
return nil, err
}
@@ -278,7 +282,7 @@ func (client containerClient) Create(ctx context.Context, timeout *int32, metada
}
// createPreparer prepares the Create request.
-func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) {
+func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -301,6 +305,12 @@ func (client containerClient) createPreparer(timeout *int32, metadata map[string
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if defaultEncryptionScope != nil {
+ req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope)
+ }
+ if preventEncryptionScopeOverride != nil {
+ req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride))
+ }
return req, nil
}
@@ -881,6 +891,70 @@ func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipel
return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err
}
+// Restore restores a previously-deleted container.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+// deletedContainerName is optional. Version 2019-12-12 and laster. Specifies the name of the deleted container to
+// restore. deletedContainerVersion is optional. Version 2019-12-12 and laster. Specifies the version of the deleted
+// container to restore.
+func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*ContainerRestoreResponse), err
+}
+
+// restorePreparer prepares the Restore request.
+func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("PUT", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("restype", "container")
+ params.Set("comp", "undelete")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ if deletedContainerName != nil {
+ req.Header.Set("x-ms-deleted-container-name", *deletedContainerName)
+ }
+ if deletedContainerVersion != nil {
+ req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion)
+ }
+ return req, nil
+}
+
+// restoreResponder handles the response to the Restore request.
+func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK, http.StatusCreated)
+ if resp == nil {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, resp.Response().Body)
+ resp.Response().Body.Close()
+ return &ContainerRestoreResponse{rawResponse: resp.Response()}, err
+}
+
// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a
// container may be accessed publicly.
//
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
index 391584969a..78f467c406 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
@@ -4,8 +4,6 @@ package azblob
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
- "crypto/hmac"
- "crypto/sha256"
"encoding/base64"
"encoding/xml"
"errors"
@@ -109,6 +107,8 @@ const (
AccessTierNone AccessTierType = ""
// AccessTierP10 ...
AccessTierP10 AccessTierType = "P10"
+ // AccessTierP15 ...
+ AccessTierP15 AccessTierType = "P15"
// AccessTierP20 ...
AccessTierP20 AccessTierType = "P20"
// AccessTierP30 ...
@@ -121,11 +121,17 @@ const (
AccessTierP50 AccessTierType = "P50"
// AccessTierP6 ...
AccessTierP6 AccessTierType = "P6"
+ // AccessTierP60 ...
+ AccessTierP60 AccessTierType = "P60"
+ // AccessTierP70 ...
+ AccessTierP70 AccessTierType = "P70"
+ // AccessTierP80 ...
+ AccessTierP80 AccessTierType = "P80"
)
// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type.
func PossibleAccessTierTypeValues() []AccessTierType {
- return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6}
+ return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80}
}
// AccountKindType enumerates the values for account kind type.
@@ -134,6 +140,10 @@ type AccountKindType string
const (
// AccountKindBlobStorage ...
AccountKindBlobStorage AccountKindType = "BlobStorage"
+ // AccountKindBlockBlobStorage ...
+ AccountKindBlockBlobStorage AccountKindType = "BlockBlobStorage"
+ // AccountKindFileStorage ...
+ AccountKindFileStorage AccountKindType = "FileStorage"
// AccountKindNone represents an empty AccountKindType.
AccountKindNone AccountKindType = ""
// AccountKindStorage ...
@@ -144,7 +154,7 @@ const (
// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type.
func PossibleAccountKindTypeValues() []AccountKindType {
- return []AccountKindType{AccountKindBlobStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2}
+ return []AccountKindType{AccountKindBlobStorage, AccountKindBlockBlobStorage, AccountKindFileStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2}
}
// ArchiveStatusType enumerates the values for archive status type.
@@ -164,6 +174,27 @@ func PossibleArchiveStatusTypeValues() []ArchiveStatusType {
return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot}
}
+// BlobExpiryOptionsType enumerates the values for blob expiry options type.
+type BlobExpiryOptionsType string
+
+const (
+ // BlobExpiryOptionsAbsolute ...
+ BlobExpiryOptionsAbsolute BlobExpiryOptionsType = "Absolute"
+ // BlobExpiryOptionsNeverExpire ...
+ BlobExpiryOptionsNeverExpire BlobExpiryOptionsType = "NeverExpire"
+ // BlobExpiryOptionsNone represents an empty BlobExpiryOptionsType.
+ BlobExpiryOptionsNone BlobExpiryOptionsType = ""
+ // BlobExpiryOptionsRelativeToCreation ...
+ BlobExpiryOptionsRelativeToCreation BlobExpiryOptionsType = "RelativeToCreation"
+ // BlobExpiryOptionsRelativeToNow ...
+ BlobExpiryOptionsRelativeToNow BlobExpiryOptionsType = "RelativeToNow"
+)
+
+// PossibleBlobExpiryOptionsTypeValues returns an array of possible values for the BlobExpiryOptionsType const type.
+func PossibleBlobExpiryOptionsTypeValues() []BlobExpiryOptionsType {
+ return []BlobExpiryOptionsType{BlobExpiryOptionsAbsolute, BlobExpiryOptionsNeverExpire, BlobExpiryOptionsNone, BlobExpiryOptionsRelativeToCreation, BlobExpiryOptionsRelativeToNow}
+}
+
// BlobType enumerates the values for blob type.
type BlobType string
@@ -240,6 +271,21 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly}
}
+// EncryptionAlgorithmType enumerates the values for encryption algorithm type.
+type EncryptionAlgorithmType string
+
+const (
+ // EncryptionAlgorithmAES256 ...
+ EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256"
+ // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType.
+ EncryptionAlgorithmNone EncryptionAlgorithmType = ""
+)
+
+// PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type.
+func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
+ return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone}
+}
+
// GeoReplicationStatusType enumerates the values for geo replication status type.
type GeoReplicationStatusType string
@@ -330,19 +376,25 @@ const (
ListBlobsIncludeItemNone ListBlobsIncludeItemType = ""
// ListBlobsIncludeItemSnapshots ...
ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots"
+ // ListBlobsIncludeItemTags ...
+ ListBlobsIncludeItemTags ListBlobsIncludeItemType = "tags"
// ListBlobsIncludeItemUncommittedblobs ...
ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs"
+ // ListBlobsIncludeItemVersions ...
+ ListBlobsIncludeItemVersions ListBlobsIncludeItemType = "versions"
)
// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type.
func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType {
- return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemUncommittedblobs}
+ return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions}
}
// ListContainersIncludeType enumerates the values for list containers include type.
type ListContainersIncludeType string
const (
+ // ListContainersIncludeDeleted ...
+ ListContainersIncludeDeleted ListContainersIncludeType = "deleted"
// ListContainersIncludeMetadata ...
ListContainersIncludeMetadata ListContainersIncludeType = "metadata"
// ListContainersIncludeNone represents an empty ListContainersIncludeType.
@@ -351,7 +403,59 @@ const (
// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type.
func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType {
- return []ListContainersIncludeType{ListContainersIncludeMetadata, ListContainersIncludeNone}
+ return []ListContainersIncludeType{ListContainersIncludeDeleted, ListContainersIncludeMetadata, ListContainersIncludeNone}
+}
+
+// PathRenameModeType enumerates the values for path rename mode type.
+type PathRenameModeType string
+
+const (
+ // PathRenameModeLegacy ...
+ PathRenameModeLegacy PathRenameModeType = "legacy"
+ // PathRenameModeNone represents an empty PathRenameModeType.
+ PathRenameModeNone PathRenameModeType = ""
+ // PathRenameModePosix ...
+ PathRenameModePosix PathRenameModeType = "posix"
+)
+
+// PossiblePathRenameModeTypeValues returns an array of possible values for the PathRenameModeType const type.
+func PossiblePathRenameModeTypeValues() []PathRenameModeType {
+ return []PathRenameModeType{PathRenameModeLegacy, PathRenameModeNone, PathRenameModePosix}
+}
+
+// PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type.
+type PremiumPageBlobAccessTierType string
+
+const (
+ // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType.
+ PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = ""
+ // PremiumPageBlobAccessTierP10 ...
+ PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10"
+ // PremiumPageBlobAccessTierP15 ...
+ PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15"
+ // PremiumPageBlobAccessTierP20 ...
+ PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20"
+ // PremiumPageBlobAccessTierP30 ...
+ PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30"
+ // PremiumPageBlobAccessTierP4 ...
+ PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4"
+ // PremiumPageBlobAccessTierP40 ...
+ PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40"
+ // PremiumPageBlobAccessTierP50 ...
+ PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50"
+ // PremiumPageBlobAccessTierP6 ...
+ PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6"
+ // PremiumPageBlobAccessTierP60 ...
+ PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60"
+ // PremiumPageBlobAccessTierP70 ...
+ PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70"
+ // PremiumPageBlobAccessTierP80 ...
+ PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80"
+)
+
+// PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type.
+func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType {
+ return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80}
}
// PublicAccessType enumerates the values for public access type.
@@ -371,6 +475,40 @@ func PossiblePublicAccessTypeValues() []PublicAccessType {
return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone}
}
+// QueryFormatType enumerates the values for query format type.
+type QueryFormatType string
+
+const (
+ // QueryFormatDelimited ...
+ QueryFormatDelimited QueryFormatType = "delimited"
+ // QueryFormatJSON ...
+ QueryFormatJSON QueryFormatType = "json"
+ // QueryFormatNone represents an empty QueryFormatType.
+ QueryFormatNone QueryFormatType = ""
+)
+
+// PossibleQueryFormatTypeValues returns an array of possible values for the QueryFormatType const type.
+func PossibleQueryFormatTypeValues() []QueryFormatType {
+ return []QueryFormatType{QueryFormatDelimited, QueryFormatJSON, QueryFormatNone}
+}
+
+// RehydratePriorityType enumerates the values for rehydrate priority type.
+type RehydratePriorityType string
+
+const (
+ // RehydratePriorityHigh ...
+ RehydratePriorityHigh RehydratePriorityType = "High"
+ // RehydratePriorityNone represents an empty RehydratePriorityType.
+ RehydratePriorityNone RehydratePriorityType = ""
+ // RehydratePriorityStandard ...
+ RehydratePriorityStandard RehydratePriorityType = "Standard"
+)
+
+// PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type.
+func PossibleRehydratePriorityTypeValues() []RehydratePriorityType {
+ return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard}
+}
+
// SequenceNumberActionType enumerates the values for sequence number action type.
type SequenceNumberActionType string
@@ -429,6 +567,16 @@ const (
StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed"
// StorageErrorCodeAuthorizationFailure ...
StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure"
+ // StorageErrorCodeAuthorizationPermissionMismatch ...
+ StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch"
+ // StorageErrorCodeAuthorizationProtocolMismatch ...
+ StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch"
+ // StorageErrorCodeAuthorizationResourceTypeMismatch ...
+ StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch"
+ // StorageErrorCodeAuthorizationServiceMismatch ...
+ StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch"
+ // StorageErrorCodeAuthorizationSourceIPMismatch ...
+ StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch"
// StorageErrorCodeBlobAlreadyExists ...
StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists"
// StorageErrorCodeBlobArchived ...
@@ -571,6 +719,8 @@ const (
StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode"
// StorageErrorCodeMultipleConditionHeadersNotSupported ...
StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported"
+ // StorageErrorCodeNoAuthenticationInformation ...
+ StorageErrorCodeNoAuthenticationInformation StorageErrorCodeType = "NoAuthenticationInformation"
// StorageErrorCodeNone represents an empty StorageErrorCodeType.
StorageErrorCodeNone StorageErrorCodeType = ""
// StorageErrorCodeNoPendingCopyOperation ...
@@ -633,7 +783,7 @@ const (
// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type.
func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType {
- return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode}
+ return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNoAuthenticationInformation, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode}
}
// SyncCopyStatusType enumerates the values for sync copy status type.
@@ -654,11 +804,11 @@ func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType {
// AccessPolicy - An Access policy
type AccessPolicy struct {
// Start - the date-time the policy is active
- Start time.Time `xml:"Start"`
+ Start *time.Time `xml:"Start"`
// Expiry - the date-time the policy expires
- Expiry time.Time `xml:"Expiry"`
+ Expiry *time.Time `xml:"Expiry"`
// Permission - the permissions for the acl policy
- Permission string `xml:"Permission"`
+ Permission *string `xml:"Permission"`
}
// MarshalXML implements the xml.Marshaler interface for AccessPolicy.
@@ -737,6 +887,16 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string {
+ return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionScope() string {
+ return ababfur.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string {
return ababfur.rawResponse.Header.Get("x-ms-error-code")
@@ -747,6 +907,11 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag {
return ETag(ababfur.rawResponse.Header.Get("ETag"))
}
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string {
+ return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
// LastModified returns the value for header Last-Modified.
func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time {
s := ababfur.rawResponse.Header.Get("Last-Modified")
@@ -770,6 +935,19 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string {
return ababfur.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte {
+ s := ababfur.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// AppendBlobAppendBlockResponse ...
type AppendBlobAppendBlockResponse struct {
rawResponse *http.Response
@@ -808,6 +986,11 @@ func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 {
return int32(i)
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string {
+ return ababr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte {
s := ababr.rawResponse.Header.Get("Content-MD5")
@@ -834,6 +1017,16 @@ func (ababr AppendBlobAppendBlockResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string {
+ return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (ababr AppendBlobAppendBlockResponse) EncryptionScope() string {
+ return ababr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (ababr AppendBlobAppendBlockResponse) ErrorCode() string {
return ababr.rawResponse.Header.Get("x-ms-error-code")
@@ -872,6 +1065,19 @@ func (ababr AppendBlobAppendBlockResponse) Version() string {
return ababr.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte {
+ s := ababr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// AppendBlobCreateResponse ...
type AppendBlobCreateResponse struct {
rawResponse *http.Response
@@ -892,6 +1098,11 @@ func (abcr AppendBlobCreateResponse) Status() string {
return abcr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (abcr AppendBlobCreateResponse) ClientRequestID() string {
+ return abcr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (abcr AppendBlobCreateResponse) ContentMD5() []byte {
s := abcr.rawResponse.Header.Get("Content-MD5")
@@ -918,6 +1129,16 @@ func (abcr AppendBlobCreateResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string {
+ return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (abcr AppendBlobCreateResponse) EncryptionScope() string {
+ return abcr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (abcr AppendBlobCreateResponse) ErrorCode() string {
return abcr.rawResponse.Header.Get("x-ms-error-code")
@@ -956,6 +1177,87 @@ func (abcr AppendBlobCreateResponse) Version() string {
return abcr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (abcr AppendBlobCreateResponse) VersionID() string {
+ return abcr.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// AppendBlobSealResponse ...
+type AppendBlobSealResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (absr AppendBlobSealResponse) Response() *http.Response {
+ return absr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (absr AppendBlobSealResponse) StatusCode() int {
+ return absr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (absr AppendBlobSealResponse) Status() string {
+ return absr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (absr AppendBlobSealResponse) ClientRequestID() string {
+ return absr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (absr AppendBlobSealResponse) Date() time.Time {
+ s := absr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (absr AppendBlobSealResponse) ErrorCode() string {
+ return absr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (absr AppendBlobSealResponse) ETag() ETag {
+ return ETag(absr.rawResponse.Header.Get("ETag"))
+}
+
+// IsSealed returns the value for header x-ms-blob-sealed.
+func (absr AppendBlobSealResponse) IsSealed() string {
+ return absr.rawResponse.Header.Get("x-ms-blob-sealed")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (absr AppendBlobSealResponse) LastModified() time.Time {
+ s := absr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (absr AppendBlobSealResponse) RequestID() string {
+ return absr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (absr AppendBlobSealResponse) Version() string {
+ return absr.rawResponse.Header.Get("x-ms-version")
+}
+
// BlobAbortCopyFromURLResponse ...
type BlobAbortCopyFromURLResponse struct {
rawResponse *http.Response
@@ -976,6 +1278,11 @@ func (bacfur BlobAbortCopyFromURLResponse) Status() string {
return bacfur.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string {
+ return bacfur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time {
s := bacfur.rawResponse.Header.Get("Date")
@@ -1024,6 +1331,11 @@ func (balr BlobAcquireLeaseResponse) Status() string {
return balr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (balr BlobAcquireLeaseResponse) ClientRequestID() string {
+ return balr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (balr BlobAcquireLeaseResponse) Date() time.Time {
s := balr.rawResponse.Header.Get("Date")
@@ -1095,6 +1407,11 @@ func (bblr BlobBreakLeaseResponse) Status() string {
return bblr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bblr BlobBreakLeaseResponse) ClientRequestID() string {
+ return bblr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bblr BlobBreakLeaseResponse) Date() time.Time {
s := bblr.rawResponse.Header.Get("Date")
@@ -1174,6 +1491,11 @@ func (bclr BlobChangeLeaseResponse) Status() string {
return bclr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bclr BlobChangeLeaseResponse) ClientRequestID() string {
+ return bclr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bclr BlobChangeLeaseResponse) Date() time.Time {
s := bclr.rawResponse.Header.Get("Date")
@@ -1245,6 +1567,24 @@ func (bcfur BlobCopyFromURLResponse) Status() string {
return bcfur.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bcfur BlobCopyFromURLResponse) ClientRequestID() string {
+ return bcfur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte {
+ s := bcfur.rawResponse.Header.Get("Content-MD5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// CopyID returns the value for header x-ms-copy-id.
func (bcfur BlobCopyFromURLResponse) CopyID() string {
return bcfur.rawResponse.Header.Get("x-ms-copy-id")
@@ -1301,6 +1641,24 @@ func (bcfur BlobCopyFromURLResponse) Version() string {
return bcfur.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (bcfur BlobCopyFromURLResponse) VersionID() string {
+ return bcfur.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte {
+ s := bcfur.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// BlobCreateSnapshotResponse ...
type BlobCreateSnapshotResponse struct {
rawResponse *http.Response
@@ -1321,6 +1679,11 @@ func (bcsr BlobCreateSnapshotResponse) Status() string {
return bcsr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string {
+ return bcsr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bcsr BlobCreateSnapshotResponse) Date() time.Time {
s := bcsr.rawResponse.Header.Get("Date")
@@ -1344,6 +1707,11 @@ func (bcsr BlobCreateSnapshotResponse) ETag() ETag {
return ETag(bcsr.rawResponse.Header.Get("ETag"))
}
+// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
+func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string {
+ return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted")
+}
+
// LastModified returns the value for header Last-Modified.
func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time {
s := bcsr.rawResponse.Header.Get("Last-Modified")
@@ -1372,6 +1740,11 @@ func (bcsr BlobCreateSnapshotResponse) Version() string {
return bcsr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (bcsr BlobCreateSnapshotResponse) VersionID() string {
+ return bcsr.rawResponse.Header.Get("x-ms-version-id")
+}
+
// BlobDeleteResponse ...
type BlobDeleteResponse struct {
rawResponse *http.Response
@@ -1392,6 +1765,11 @@ func (bdr BlobDeleteResponse) Status() string {
return bdr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bdr BlobDeleteResponse) ClientRequestID() string {
+ return bdr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bdr BlobDeleteResponse) Date() time.Time {
s := bdr.rawResponse.Header.Get("Date")
@@ -1423,8 +1801,94 @@ func (bdr BlobDeleteResponse) Version() string {
// BlobFlatListSegment ...
type BlobFlatListSegment struct {
// XMLName is used for marshalling and is subject to removal in a future release.
- XMLName xml.Name `xml:"Blobs"`
- BlobItems []BlobItem `xml:"Blob"`
+ XMLName xml.Name `xml:"Blobs"`
+ BlobItems []BlobItemInternal `xml:"Blob"`
+}
+
+// BlobGetAccessControlResponse ...
+type BlobGetAccessControlResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (bgacr BlobGetAccessControlResponse) Response() *http.Response {
+ return bgacr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (bgacr BlobGetAccessControlResponse) StatusCode() int {
+ return bgacr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (bgacr BlobGetAccessControlResponse) Status() string {
+ return bgacr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bgacr BlobGetAccessControlResponse) ClientRequestID() string {
+ return bgacr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (bgacr BlobGetAccessControlResponse) Date() time.Time {
+ s := bgacr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ETag returns the value for header ETag.
+func (bgacr BlobGetAccessControlResponse) ETag() ETag {
+ return ETag(bgacr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (bgacr BlobGetAccessControlResponse) LastModified() time.Time {
+ s := bgacr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bgacr BlobGetAccessControlResponse) RequestID() string {
+ return bgacr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (bgacr BlobGetAccessControlResponse) Version() string {
+ return bgacr.rawResponse.Header.Get("x-ms-version")
+}
+
+// XMsACL returns the value for header x-ms-acl.
+func (bgacr BlobGetAccessControlResponse) XMsACL() string {
+ return bgacr.rawResponse.Header.Get("x-ms-acl")
+}
+
+// XMsGroup returns the value for header x-ms-group.
+func (bgacr BlobGetAccessControlResponse) XMsGroup() string {
+ return bgacr.rawResponse.Header.Get("x-ms-group")
+}
+
+// XMsOwner returns the value for header x-ms-owner.
+func (bgacr BlobGetAccessControlResponse) XMsOwner() string {
+ return bgacr.rawResponse.Header.Get("x-ms-owner")
+}
+
+// XMsPermissions returns the value for header x-ms-permissions.
+func (bgacr BlobGetAccessControlResponse) XMsPermissions() string {
+ return bgacr.rawResponse.Header.Get("x-ms-permissions")
}
// BlobGetAccountInfoResponse ...
@@ -1452,6 +1916,11 @@ func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType {
return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind"))
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bgair BlobGetAccountInfoResponse) ClientRequestID() string {
+ return bgair.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bgair BlobGetAccountInfoResponse) Date() time.Time {
s := bgair.rawResponse.Header.Get("Date")
@@ -1587,6 +2056,11 @@ func (bgpr BlobGetPropertiesResponse) CacheControl() string {
return bgpr.rawResponse.Header.Get("Cache-Control")
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bgpr BlobGetPropertiesResponse) ClientRequestID() string {
+ return bgpr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentDisposition returns the value for header Content-Disposition.
func (bgpr BlobGetPropertiesResponse) ContentDisposition() string {
return bgpr.rawResponse.Header.Get("Content-Disposition")
@@ -1702,6 +2176,16 @@ func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string {
return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot")
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string {
+ return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bgpr BlobGetPropertiesResponse) EncryptionScope() string {
+ return bgpr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bgpr BlobGetPropertiesResponse) ErrorCode() string {
return bgpr.rawResponse.Header.Get("x-ms-error-code")
@@ -1712,11 +2196,34 @@ func (bgpr BlobGetPropertiesResponse) ETag() ETag {
return ETag(bgpr.rawResponse.Header.Get("ETag"))
}
+// ExpiresOn returns the value for header x-ms-expiry-time.
+func (bgpr BlobGetPropertiesResponse) ExpiresOn() time.Time {
+ s := bgpr.rawResponse.Header.Get("x-ms-expiry-time")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// IsCurrentVersion returns the value for header x-ms-is-current-version.
+func (bgpr BlobGetPropertiesResponse) IsCurrentVersion() string {
+ return bgpr.rawResponse.Header.Get("x-ms-is-current-version")
+}
+
// IsIncrementalCopy returns the value for header x-ms-incremental-copy.
func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string {
return bgpr.rawResponse.Header.Get("x-ms-incremental-copy")
}
+// IsSealed returns the value for header x-ms-blob-sealed.
+func (bgpr BlobGetPropertiesResponse) IsSealed() string {
+ return bgpr.rawResponse.Header.Get("x-ms-blob-sealed")
+}
+
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string {
return bgpr.rawResponse.Header.Get("x-ms-server-encrypted")
@@ -1750,33 +2257,81 @@ func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType {
return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status"))
}
-// RequestID returns the value for header x-ms-request-id.
-func (bgpr BlobGetPropertiesResponse) RequestID() string {
- return bgpr.rawResponse.Header.Get("x-ms-request-id")
-}
-
-// Version returns the value for header x-ms-version.
-func (bgpr BlobGetPropertiesResponse) Version() string {
- return bgpr.rawResponse.Header.Get("x-ms-version")
+// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id.
+func (bgpr BlobGetPropertiesResponse) ObjectReplicationPolicyID() string {
+ return bgpr.rawResponse.Header.Get("x-ms-or-policy-id")
}
-// BlobHierarchyListSegment ...
-type BlobHierarchyListSegment struct {
- // XMLName is used for marshalling and is subject to removal in a future release.
- XMLName xml.Name `xml:"Blobs"`
- BlobPrefixes []BlobPrefix `xml:"BlobPrefix"`
- BlobItems []BlobItem `xml:"Blob"`
+// ObjectReplicationRules returns the value for header x-ms-or.
+func (bgpr BlobGetPropertiesResponse) ObjectReplicationRules() string {
+ return bgpr.rawResponse.Header.Get("x-ms-or")
}
-// BlobItem - An Azure Storage blob
-type BlobItem struct {
- // XMLName is used for marshalling and is subject to removal in a future release.
- XMLName xml.Name `xml:"Blob"`
- Name string `xml:"Name"`
- Deleted bool `xml:"Deleted"`
- Snapshot string `xml:"Snapshot"`
- Properties BlobProperties `xml:"Properties"`
- Metadata Metadata `xml:"Metadata"`
+// RehydratePriority returns the value for header x-ms-rehydrate-priority.
+func (bgpr BlobGetPropertiesResponse) RehydratePriority() string {
+ return bgpr.rawResponse.Header.Get("x-ms-rehydrate-priority")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bgpr BlobGetPropertiesResponse) RequestID() string {
+ return bgpr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// TagCount returns the value for header x-ms-tag-count.
+func (bgpr BlobGetPropertiesResponse) TagCount() int64 {
+ s := bgpr.rawResponse.Header.Get("x-ms-tag-count")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// Version returns the value for header x-ms-version.
+func (bgpr BlobGetPropertiesResponse) Version() string {
+ return bgpr.rawResponse.Header.Get("x-ms-version")
+}
+
+// VersionID returns the value for header x-ms-version-id.
+func (bgpr BlobGetPropertiesResponse) VersionID() string {
+ return bgpr.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// BlobHierarchyListSegment ...
+type BlobHierarchyListSegment struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Blobs"`
+ BlobPrefixes []BlobPrefix `xml:"BlobPrefix"`
+ BlobItems []BlobItemInternal `xml:"Blob"`
+}
+
+// BlobItemInternal - An Azure Storage blob
+type BlobItemInternal struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Blob"`
+ Name string `xml:"Name"`
+ Deleted bool `xml:"Deleted"`
+ Snapshot string `xml:"Snapshot"`
+ VersionID *string `xml:"VersionId"`
+ IsCurrentVersion *bool `xml:"IsCurrentVersion"`
+ Properties BlobProperties `xml:"Properties"`
+
+ // TODO funky generator type -> *BlobMetadata
+ Metadata Metadata `xml:"Metadata"`
+ BlobTags *BlobTags `xml:"Tags"`
+ ObjectReplicationMetadata map[string]string `xml:"ObjectReplicationMetadata"`
+}
+
+// BlobMetadata ...
+type BlobMetadata struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Metadata"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]string `xml:"AdditionalProperties"`
+ Encrypted *string `xml:"Encrypted,attr"`
}
// BlobPrefix ...
@@ -1820,24 +2375,32 @@ type BlobProperties struct {
DestinationSnapshot *string `xml:"DestinationSnapshot"`
DeletedTime *time.Time `xml:"DeletedTime"`
RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
- // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone'
+ // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone'
AccessTier AccessTierType `xml:"AccessTier"`
AccessTierInferred *bool `xml:"AccessTierInferred"`
// ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone'
- ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
- AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"`
+ ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
+ CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"`
+ // EncryptionScope - The name of the encryption scope under which the blob is encrypted.
+ EncryptionScope *string `xml:"EncryptionScope"`
+ AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"`
+ TagCount *int32 `xml:"TagCount"`
+ ExpiresOn *time.Time `xml:"Expiry-Time"`
+ IsSealed *bool `xml:"IsSealed"`
+ // RehydratePriority - Possible values include: 'RehydratePriorityHigh', 'RehydratePriorityStandard', 'RehydratePriorityNone'
+ RehydratePriority RehydratePriorityType `xml:"RehydratePriority"`
}
// MarshalXML implements the xml.Marshaler interface for BlobProperties.
-func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
- bp2 := (*blobProperties)(unsafe.Pointer(&bp))
- return e.EncodeElement(*bp2, start)
+func (bpi BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ bpi2 := (*blobProperties)(unsafe.Pointer(&bpi))
+ return e.EncodeElement(*bpi2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties.
-func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
- bp2 := (*blobProperties)(unsafe.Pointer(bp))
- return d.DecodeElement(bp2, &start)
+func (bpi *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ bpi2 := (*blobProperties)(unsafe.Pointer(bpi))
+ return d.DecodeElement(bpi2, &start)
}
// BlobReleaseLeaseResponse ...
@@ -1860,6 +2423,11 @@ func (brlr BlobReleaseLeaseResponse) Status() string {
return brlr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (brlr BlobReleaseLeaseResponse) ClientRequestID() string {
+ return brlr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (brlr BlobReleaseLeaseResponse) Date() time.Time {
s := brlr.rawResponse.Header.Get("Date")
@@ -1906,6 +2474,85 @@ func (brlr BlobReleaseLeaseResponse) Version() string {
return brlr.rawResponse.Header.Get("x-ms-version")
}
+// BlobRenameResponse ...
+type BlobRenameResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (brr BlobRenameResponse) Response() *http.Response {
+ return brr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (brr BlobRenameResponse) StatusCode() int {
+ return brr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (brr BlobRenameResponse) Status() string {
+ return brr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (brr BlobRenameResponse) ClientRequestID() string {
+ return brr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (brr BlobRenameResponse) ContentLength() int64 {
+ s := brr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// Date returns the value for header Date.
+func (brr BlobRenameResponse) Date() time.Time {
+ s := brr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ETag returns the value for header ETag.
+func (brr BlobRenameResponse) ETag() ETag {
+ return ETag(brr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (brr BlobRenameResponse) LastModified() time.Time {
+ s := brr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (brr BlobRenameResponse) RequestID() string {
+ return brr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (brr BlobRenameResponse) Version() string {
+ return brr.rawResponse.Header.Get("x-ms-version")
+}
+
// BlobRenewLeaseResponse ...
type BlobRenewLeaseResponse struct {
rawResponse *http.Response
@@ -1926,6 +2573,11 @@ func (brlr BlobRenewLeaseResponse) Status() string {
return brlr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (brlr BlobRenewLeaseResponse) ClientRequestID() string {
+ return brlr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (brlr BlobRenewLeaseResponse) Date() time.Time {
s := brlr.rawResponse.Header.Get("Date")
@@ -1977,6 +2629,143 @@ func (brlr BlobRenewLeaseResponse) Version() string {
return brlr.rawResponse.Header.Get("x-ms-version")
}
+// BlobSetAccessControlResponse ...
+type BlobSetAccessControlResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (bsacr BlobSetAccessControlResponse) Response() *http.Response {
+ return bsacr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (bsacr BlobSetAccessControlResponse) StatusCode() int {
+ return bsacr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (bsacr BlobSetAccessControlResponse) Status() string {
+ return bsacr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bsacr BlobSetAccessControlResponse) ClientRequestID() string {
+ return bsacr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (bsacr BlobSetAccessControlResponse) Date() time.Time {
+ s := bsacr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ETag returns the value for header ETag.
+func (bsacr BlobSetAccessControlResponse) ETag() ETag {
+ return ETag(bsacr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (bsacr BlobSetAccessControlResponse) LastModified() time.Time {
+ s := bsacr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bsacr BlobSetAccessControlResponse) RequestID() string {
+ return bsacr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (bsacr BlobSetAccessControlResponse) Version() string {
+ return bsacr.rawResponse.Header.Get("x-ms-version")
+}
+
+// BlobSetExpiryResponse ...
+type BlobSetExpiryResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (bser BlobSetExpiryResponse) Response() *http.Response {
+ return bser.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (bser BlobSetExpiryResponse) StatusCode() int {
+ return bser.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (bser BlobSetExpiryResponse) Status() string {
+ return bser.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bser BlobSetExpiryResponse) ClientRequestID() string {
+ return bser.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (bser BlobSetExpiryResponse) Date() time.Time {
+ s := bser.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (bser BlobSetExpiryResponse) ErrorCode() string {
+ return bser.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (bser BlobSetExpiryResponse) ETag() ETag {
+ return ETag(bser.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (bser BlobSetExpiryResponse) LastModified() time.Time {
+ s := bser.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bser BlobSetExpiryResponse) RequestID() string {
+ return bser.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (bser BlobSetExpiryResponse) Version() string {
+ return bser.rawResponse.Header.Get("x-ms-version")
+}
+
// BlobSetHTTPHeadersResponse ...
type BlobSetHTTPHeadersResponse struct {
rawResponse *http.Response
@@ -2010,6 +2799,11 @@ func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string {
+ return bshhr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time {
s := bshhr.rawResponse.Header.Get("Date")
@@ -2076,6 +2870,11 @@ func (bsmr BlobSetMetadataResponse) Status() string {
return bsmr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bsmr BlobSetMetadataResponse) ClientRequestID() string {
+ return bsmr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (bsmr BlobSetMetadataResponse) Date() time.Time {
s := bsmr.rawResponse.Header.Get("Date")
@@ -2089,6 +2888,16 @@ func (bsmr BlobSetMetadataResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string {
+ return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bsmr BlobSetMetadataResponse) EncryptionScope() string {
+ return bsmr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bsmr BlobSetMetadataResponse) ErrorCode() string {
return bsmr.rawResponse.Header.Get("x-ms-error-code")
@@ -2127,6 +2936,64 @@ func (bsmr BlobSetMetadataResponse) Version() string {
return bsmr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (bsmr BlobSetMetadataResponse) VersionID() string {
+ return bsmr.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// BlobSetTagsResponse ...
+type BlobSetTagsResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (bstr BlobSetTagsResponse) Response() *http.Response {
+ return bstr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (bstr BlobSetTagsResponse) StatusCode() int {
+ return bstr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (bstr BlobSetTagsResponse) Status() string {
+ return bstr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bstr BlobSetTagsResponse) ClientRequestID() string {
+ return bstr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (bstr BlobSetTagsResponse) Date() time.Time {
+ s := bstr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (bstr BlobSetTagsResponse) ErrorCode() string {
+ return bstr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bstr BlobSetTagsResponse) RequestID() string {
+ return bstr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (bstr BlobSetTagsResponse) Version() string {
+ return bstr.rawResponse.Header.Get("x-ms-version")
+}
+
// BlobSetTierResponse ...
type BlobSetTierResponse struct {
rawResponse *http.Response
@@ -2147,6 +3014,11 @@ func (bstr BlobSetTierResponse) Status() string {
return bstr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bstr BlobSetTierResponse) ClientRequestID() string {
+ return bstr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bstr BlobSetTierResponse) ErrorCode() string {
return bstr.rawResponse.Header.Get("x-ms-error-code")
@@ -2182,6 +3054,11 @@ func (bscfur BlobStartCopyFromURLResponse) Status() string {
return bscfur.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string {
+ return bscfur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// CopyID returns the value for header x-ms-copy-id.
func (bscfur BlobStartCopyFromURLResponse) CopyID() string {
return bscfur.rawResponse.Header.Get("x-ms-copy-id")
@@ -2238,29 +3115,103 @@ func (bscfur BlobStartCopyFromURLResponse) Version() string {
return bscfur.rawResponse.Header.Get("x-ms-version")
}
-// BlobUndeleteResponse ...
-type BlobUndeleteResponse struct {
+// VersionID returns the value for header x-ms-version-id.
+func (bscfur BlobStartCopyFromURLResponse) VersionID() string {
+ return bscfur.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// BlobTag ...
+type BlobTag struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Tag"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+// BlobTags - Blob tags
+type BlobTags struct {
rawResponse *http.Response
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Tags"`
+ BlobTagSet []BlobTag `xml:"TagSet>Tag"`
}
// Response returns the raw HTTP response object.
-func (bur BlobUndeleteResponse) Response() *http.Response {
- return bur.rawResponse
+func (bt BlobTags) Response() *http.Response {
+ return bt.rawResponse
}
// StatusCode returns the HTTP status code of the response, e.g. 200.
-func (bur BlobUndeleteResponse) StatusCode() int {
- return bur.rawResponse.StatusCode
+func (bt BlobTags) StatusCode() int {
+ return bt.rawResponse.StatusCode
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
-func (bur BlobUndeleteResponse) Status() string {
- return bur.rawResponse.Status
+func (bt BlobTags) Status() string {
+ return bt.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bt BlobTags) ClientRequestID() string {
+ return bt.rawResponse.Header.Get("x-ms-client-request-id")
}
// Date returns the value for header Date.
-func (bur BlobUndeleteResponse) Date() time.Time {
- s := bur.rawResponse.Header.Get("Date")
+func (bt BlobTags) Date() time.Time {
+ s := bt.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (bt BlobTags) ErrorCode() string {
+ return bt.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (bt BlobTags) RequestID() string {
+ return bt.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (bt BlobTags) Version() string {
+ return bt.rawResponse.Header.Get("x-ms-version")
+}
+
+// BlobUndeleteResponse ...
+type BlobUndeleteResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (bur BlobUndeleteResponse) Response() *http.Response {
+ return bur.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (bur BlobUndeleteResponse) StatusCode() int {
+ return bur.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (bur BlobUndeleteResponse) Status() string {
+ return bur.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bur BlobUndeleteResponse) ClientRequestID() string {
+ return bur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (bur BlobUndeleteResponse) Date() time.Time {
+ s := bur.rawResponse.Header.Get("Date")
if s == "" {
return time.Time{}
}
@@ -2291,7 +3242,7 @@ type Block struct {
// Name - The base64 encoded block ID.
Name string `xml:"Name"`
// Size - The block size in bytes.
- Size int32 `xml:"Size"`
+ Size int64 `xml:"Size"`
}
// BlockBlobCommitBlockListResponse ...
@@ -2314,6 +3265,11 @@ func (bbcblr BlockBlobCommitBlockListResponse) Status() string {
return bbcblr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string {
+ return bbcblr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte {
s := bbcblr.rawResponse.Header.Get("Content-MD5")
@@ -2340,6 +3296,16 @@ func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string {
+ return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bbcblr BlockBlobCommitBlockListResponse) EncryptionScope() string {
+ return bbcblr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string {
return bbcblr.rawResponse.Header.Get("x-ms-error-code")
@@ -2378,6 +3344,24 @@ func (bbcblr BlockBlobCommitBlockListResponse) Version() string {
return bbcblr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (bbcblr BlockBlobCommitBlockListResponse) VersionID() string {
+ return bbcblr.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte {
+ s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// BlockBlobStageBlockFromURLResponse ...
type BlockBlobStageBlockFromURLResponse struct {
rawResponse *http.Response
@@ -2398,6 +3382,11 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string {
return bbsbfur.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string {
+ return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte {
s := bbsbfur.rawResponse.Header.Get("Content-MD5")
@@ -2424,6 +3413,16 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string {
+ return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionScope() string {
+ return bbsbfur.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string {
return bbsbfur.rawResponse.Header.Get("x-ms-error-code")
@@ -2444,6 +3443,19 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string {
return bbsbfur.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte {
+ s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// BlockBlobStageBlockResponse ...
type BlockBlobStageBlockResponse struct {
rawResponse *http.Response
@@ -2464,6 +3476,11 @@ func (bbsbr BlockBlobStageBlockResponse) Status() string {
return bbsbr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string {
+ return bbsbr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte {
s := bbsbr.rawResponse.Header.Get("Content-MD5")
@@ -2490,6 +3507,16 @@ func (bbsbr BlockBlobStageBlockResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string {
+ return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bbsbr BlockBlobStageBlockResponse) EncryptionScope() string {
+ return bbsbr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string {
return bbsbr.rawResponse.Header.Get("x-ms-error-code")
@@ -2510,6 +3537,19 @@ func (bbsbr BlockBlobStageBlockResponse) Version() string {
return bbsbr.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte {
+ s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// BlockBlobUploadResponse ...
type BlockBlobUploadResponse struct {
rawResponse *http.Response
@@ -2530,6 +3570,11 @@ func (bbur BlockBlobUploadResponse) Status() string {
return bbur.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bbur BlockBlobUploadResponse) ClientRequestID() string {
+ return bbur.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (bbur BlockBlobUploadResponse) ContentMD5() []byte {
s := bbur.rawResponse.Header.Get("Content-MD5")
@@ -2556,6 +3601,16 @@ func (bbur BlockBlobUploadResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string {
+ return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (bbur BlockBlobUploadResponse) EncryptionScope() string {
+ return bbur.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (bbur BlockBlobUploadResponse) ErrorCode() string {
return bbur.rawResponse.Header.Get("x-ms-error-code")
@@ -2594,6 +3649,11 @@ func (bbur BlockBlobUploadResponse) Version() string {
return bbur.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (bbur BlockBlobUploadResponse) VersionID() string {
+ return bbur.rawResponse.Header.Get("x-ms-version-id")
+}
+
// BlockList ...
type BlockList struct {
rawResponse *http.Response
@@ -2629,6 +3689,11 @@ func (bl BlockList) BlobContentLength() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (bl BlockList) ClientRequestID() string {
+ return bl.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentType returns the value for header Content-Type.
func (bl BlockList) ContentType() string {
return bl.rawResponse.Header.Get("Content-Type")
@@ -2715,6 +3780,11 @@ func (calr ContainerAcquireLeaseResponse) Status() string {
return calr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (calr ContainerAcquireLeaseResponse) ClientRequestID() string {
+ return calr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (calr ContainerAcquireLeaseResponse) Date() time.Time {
s := calr.rawResponse.Header.Get("Date")
@@ -2786,6 +3856,11 @@ func (cblr ContainerBreakLeaseResponse) Status() string {
return cblr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (cblr ContainerBreakLeaseResponse) ClientRequestID() string {
+ return cblr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (cblr ContainerBreakLeaseResponse) Date() time.Time {
s := cblr.rawResponse.Header.Get("Date")
@@ -2865,6 +3940,11 @@ func (cclr ContainerChangeLeaseResponse) Status() string {
return cclr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (cclr ContainerChangeLeaseResponse) ClientRequestID() string {
+ return cclr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (cclr ContainerChangeLeaseResponse) Date() time.Time {
s := cclr.rawResponse.Header.Get("Date")
@@ -2936,6 +4016,11 @@ func (ccr ContainerCreateResponse) Status() string {
return ccr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (ccr ContainerCreateResponse) ClientRequestID() string {
+ return ccr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (ccr ContainerCreateResponse) Date() time.Time {
s := ccr.rawResponse.Header.Get("Date")
@@ -3002,6 +4087,11 @@ func (cdr ContainerDeleteResponse) Status() string {
return cdr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (cdr ContainerDeleteResponse) ClientRequestID() string {
+ return cdr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (cdr ContainerDeleteResponse) Date() time.Time {
s := cdr.rawResponse.Header.Get("Date")
@@ -3055,6 +4145,11 @@ func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType {
return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind"))
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string {
+ return cgair.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (cgair ContainerGetAccountInfoResponse) Date() time.Time {
s := cgair.rawResponse.Header.Get("Date")
@@ -3126,6 +4221,11 @@ func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType {
return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access"))
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string {
+ return cgpr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (cgpr ContainerGetPropertiesResponse) Date() time.Time {
s := cgpr.rawResponse.Header.Get("Date")
@@ -3139,6 +4239,16 @@ func (cgpr ContainerGetPropertiesResponse) Date() time.Time {
return t
}
+// DefaultEncryptionScope returns the value for header x-ms-default-encryption-scope.
+func (cgpr ContainerGetPropertiesResponse) DefaultEncryptionScope() string {
+ return cgpr.rawResponse.Header.Get("x-ms-default-encryption-scope")
+}
+
+// DenyEncryptionScopeOverride returns the value for header x-ms-deny-encryption-scope-override.
+func (cgpr ContainerGetPropertiesResponse) DenyEncryptionScopeOverride() string {
+ return cgpr.rawResponse.Header.Get("x-ms-deny-encryption-scope-override")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (cgpr ContainerGetPropertiesResponse) ErrorCode() string {
return cgpr.rawResponse.Header.Get("x-ms-error-code")
@@ -3202,6 +4312,8 @@ type ContainerItem struct {
// XMLName is used for marshalling and is subject to removal in a future release.
XMLName xml.Name `xml:"Container"`
Name string `xml:"Name"`
+ Deleted *bool `xml:"Deleted"`
+ Version *string `xml:"Version"`
Properties ContainerProperties `xml:"Properties"`
Metadata Metadata `xml:"Metadata"`
}
@@ -3217,9 +4329,13 @@ type ContainerProperties struct {
// LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone'
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
// PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone'
- PublicAccess PublicAccessType `xml:"PublicAccess"`
- HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
- HasLegalHold *bool `xml:"HasLegalHold"`
+ PublicAccess PublicAccessType `xml:"PublicAccess"`
+ HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
+ HasLegalHold *bool `xml:"HasLegalHold"`
+ DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"`
+ PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"`
+ DeletedTime *time.Time `xml:"DeletedTime"`
+ RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
}
// MarshalXML implements the xml.Marshaler interface for ContainerProperties.
@@ -3254,6 +4370,11 @@ func (crlr ContainerReleaseLeaseResponse) Status() string {
return crlr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string {
+ return crlr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (crlr ContainerReleaseLeaseResponse) Date() time.Time {
s := crlr.rawResponse.Header.Get("Date")
@@ -3277,9 +4398,454 @@ func (crlr ContainerReleaseLeaseResponse) ETag() ETag {
return ETag(crlr.rawResponse.Header.Get("ETag"))
}
-// LastModified returns the value for header Last-Modified.
-func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time {
- s := crlr.rawResponse.Header.Get("Last-Modified")
+// LastModified returns the value for header Last-Modified.
+func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time {
+ s := crlr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (crlr ContainerReleaseLeaseResponse) RequestID() string {
+ return crlr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (crlr ContainerReleaseLeaseResponse) Version() string {
+ return crlr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ContainerRenewLeaseResponse ...
+type ContainerRenewLeaseResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (crlr ContainerRenewLeaseResponse) Response() *http.Response {
+ return crlr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (crlr ContainerRenewLeaseResponse) StatusCode() int {
+ return crlr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (crlr ContainerRenewLeaseResponse) Status() string {
+ return crlr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (crlr ContainerRenewLeaseResponse) ClientRequestID() string {
+ return crlr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (crlr ContainerRenewLeaseResponse) Date() time.Time {
+ s := crlr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (crlr ContainerRenewLeaseResponse) ErrorCode() string {
+ return crlr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (crlr ContainerRenewLeaseResponse) ETag() ETag {
+ return ETag(crlr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (crlr ContainerRenewLeaseResponse) LastModified() time.Time {
+ s := crlr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// LeaseID returns the value for header x-ms-lease-id.
+func (crlr ContainerRenewLeaseResponse) LeaseID() string {
+ return crlr.rawResponse.Header.Get("x-ms-lease-id")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (crlr ContainerRenewLeaseResponse) RequestID() string {
+ return crlr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (crlr ContainerRenewLeaseResponse) Version() string {
+ return crlr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ContainerRestoreResponse ...
+type ContainerRestoreResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (crr ContainerRestoreResponse) Response() *http.Response {
+ return crr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (crr ContainerRestoreResponse) StatusCode() int {
+ return crr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (crr ContainerRestoreResponse) Status() string {
+ return crr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (crr ContainerRestoreResponse) ClientRequestID() string {
+ return crr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (crr ContainerRestoreResponse) Date() time.Time {
+ s := crr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (crr ContainerRestoreResponse) ErrorCode() string {
+ return crr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (crr ContainerRestoreResponse) RequestID() string {
+ return crr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (crr ContainerRestoreResponse) Version() string {
+ return crr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ContainerSetAccessPolicyResponse ...
+type ContainerSetAccessPolicyResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response {
+ return csapr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (csapr ContainerSetAccessPolicyResponse) StatusCode() int {
+ return csapr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (csapr ContainerSetAccessPolicyResponse) Status() string {
+ return csapr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string {
+ return csapr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (csapr ContainerSetAccessPolicyResponse) Date() time.Time {
+ s := csapr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string {
+ return csapr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (csapr ContainerSetAccessPolicyResponse) ETag() ETag {
+ return ETag(csapr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time {
+ s := csapr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (csapr ContainerSetAccessPolicyResponse) RequestID() string {
+ return csapr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (csapr ContainerSetAccessPolicyResponse) Version() string {
+ return csapr.rawResponse.Header.Get("x-ms-version")
+}
+
+// ContainerSetMetadataResponse ...
+type ContainerSetMetadataResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (csmr ContainerSetMetadataResponse) Response() *http.Response {
+ return csmr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (csmr ContainerSetMetadataResponse) StatusCode() int {
+ return csmr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (csmr ContainerSetMetadataResponse) Status() string {
+ return csmr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (csmr ContainerSetMetadataResponse) ClientRequestID() string {
+ return csmr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (csmr ContainerSetMetadataResponse) Date() time.Time {
+ s := csmr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (csmr ContainerSetMetadataResponse) ErrorCode() string {
+ return csmr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (csmr ContainerSetMetadataResponse) ETag() ETag {
+ return ETag(csmr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (csmr ContainerSetMetadataResponse) LastModified() time.Time {
+ s := csmr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (csmr ContainerSetMetadataResponse) RequestID() string {
+ return csmr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (csmr ContainerSetMetadataResponse) Version() string {
+ return csmr.rawResponse.Header.Get("x-ms-version")
+}
+
+// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access
+// resources in another domain. Web browsers implement a security restriction known as same-origin policy that
+// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain
+// (the origin domain) to call APIs in another domain
+type CorsRule struct {
+ // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS.
+ AllowedOrigins string `xml:"AllowedOrigins"`
+ // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)
+ AllowedMethods string `xml:"AllowedMethods"`
+ // AllowedHeaders - the request headers that the origin domain may specify on the CORS request.
+ AllowedHeaders string `xml:"AllowedHeaders"`
+ // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer
+ ExposedHeaders string `xml:"ExposedHeaders"`
+ // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request.
+ MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"`
+}
+
+// DataLakeStorageError ...
+type DataLakeStorageError struct {
+ // DataLakeStorageErrorDetails - The service error response object.
+ DataLakeStorageErrorDetails *DataLakeStorageErrorError `xml:"error"`
+}
+
+// DataLakeStorageErrorError - The service error response object.
+type DataLakeStorageErrorError struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"DataLakeStorageError_error"`
+ // Code - The service error code.
+ Code *string `xml:"Code"`
+ // Message - The service error message.
+ Message *string `xml:"Message"`
+}
+
+// DelimitedTextConfiguration - delimited text configuration
+type DelimitedTextConfiguration struct {
+ // ColumnSeparator - column separator
+ ColumnSeparator string `xml:"ColumnSeparator"`
+ // FieldQuote - field quote
+ FieldQuote string `xml:"FieldQuote"`
+ // RecordSeparator - record separator
+ RecordSeparator string `xml:"RecordSeparator"`
+ // EscapeChar - escape char
+ EscapeChar string `xml:"EscapeChar"`
+ // HeadersPresent - has headers
+ HeadersPresent bool `xml:"HasHeaders"`
+}
+
+// DirectoryCreateResponse ...
+type DirectoryCreateResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (dcr DirectoryCreateResponse) Response() *http.Response {
+ return dcr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (dcr DirectoryCreateResponse) StatusCode() int {
+ return dcr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (dcr DirectoryCreateResponse) Status() string {
+ return dcr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (dcr DirectoryCreateResponse) ClientRequestID() string {
+ return dcr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (dcr DirectoryCreateResponse) ContentLength() int64 {
+ s := dcr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// Date returns the value for header Date.
+func (dcr DirectoryCreateResponse) Date() time.Time {
+ s := dcr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ETag returns the value for header ETag.
+func (dcr DirectoryCreateResponse) ETag() ETag {
+ return ETag(dcr.rawResponse.Header.Get("ETag"))
+}
+
+// LastModified returns the value for header Last-Modified.
+func (dcr DirectoryCreateResponse) LastModified() time.Time {
+ s := dcr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (dcr DirectoryCreateResponse) RequestID() string {
+ return dcr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (dcr DirectoryCreateResponse) Version() string {
+ return dcr.rawResponse.Header.Get("x-ms-version")
+}
+
+// DirectoryDeleteResponse ...
+type DirectoryDeleteResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (ddr DirectoryDeleteResponse) Response() *http.Response {
+ return ddr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (ddr DirectoryDeleteResponse) StatusCode() int {
+ return ddr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (ddr DirectoryDeleteResponse) Status() string {
+ return ddr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (ddr DirectoryDeleteResponse) ClientRequestID() string {
+ return ddr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (ddr DirectoryDeleteResponse) Date() time.Time {
+ s := ddr.rawResponse.Header.Get("Date")
if s == "" {
return time.Time{}
}
@@ -3290,39 +4856,49 @@ func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time {
return t
}
+// Marker returns the value for header x-ms-continuation.
+func (ddr DirectoryDeleteResponse) Marker() string {
+ return ddr.rawResponse.Header.Get("x-ms-continuation")
+}
+
// RequestID returns the value for header x-ms-request-id.
-func (crlr ContainerReleaseLeaseResponse) RequestID() string {
- return crlr.rawResponse.Header.Get("x-ms-request-id")
+func (ddr DirectoryDeleteResponse) RequestID() string {
+ return ddr.rawResponse.Header.Get("x-ms-request-id")
}
// Version returns the value for header x-ms-version.
-func (crlr ContainerReleaseLeaseResponse) Version() string {
- return crlr.rawResponse.Header.Get("x-ms-version")
+func (ddr DirectoryDeleteResponse) Version() string {
+ return ddr.rawResponse.Header.Get("x-ms-version")
}
-// ContainerRenewLeaseResponse ...
-type ContainerRenewLeaseResponse struct {
+// DirectoryGetAccessControlResponse ...
+type DirectoryGetAccessControlResponse struct {
rawResponse *http.Response
}
// Response returns the raw HTTP response object.
-func (crlr ContainerRenewLeaseResponse) Response() *http.Response {
- return crlr.rawResponse
+func (dgacr DirectoryGetAccessControlResponse) Response() *http.Response {
+ return dgacr.rawResponse
}
// StatusCode returns the HTTP status code of the response, e.g. 200.
-func (crlr ContainerRenewLeaseResponse) StatusCode() int {
- return crlr.rawResponse.StatusCode
+func (dgacr DirectoryGetAccessControlResponse) StatusCode() int {
+ return dgacr.rawResponse.StatusCode
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
-func (crlr ContainerRenewLeaseResponse) Status() string {
- return crlr.rawResponse.Status
+func (dgacr DirectoryGetAccessControlResponse) Status() string {
+ return dgacr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (dgacr DirectoryGetAccessControlResponse) ClientRequestID() string {
+ return dgacr.rawResponse.Header.Get("x-ms-client-request-id")
}
// Date returns the value for header Date.
-func (crlr ContainerRenewLeaseResponse) Date() time.Time {
- s := crlr.rawResponse.Header.Get("Date")
+func (dgacr DirectoryGetAccessControlResponse) Date() time.Time {
+ s := dgacr.rawResponse.Header.Get("Date")
if s == "" {
return time.Time{}
}
@@ -3333,19 +4909,14 @@ func (crlr ContainerRenewLeaseResponse) Date() time.Time {
return t
}
-// ErrorCode returns the value for header x-ms-error-code.
-func (crlr ContainerRenewLeaseResponse) ErrorCode() string {
- return crlr.rawResponse.Header.Get("x-ms-error-code")
-}
-
// ETag returns the value for header ETag.
-func (crlr ContainerRenewLeaseResponse) ETag() ETag {
- return ETag(crlr.rawResponse.Header.Get("ETag"))
+func (dgacr DirectoryGetAccessControlResponse) ETag() ETag {
+ return ETag(dgacr.rawResponse.Header.Get("ETag"))
}
// LastModified returns the value for header Last-Modified.
-func (crlr ContainerRenewLeaseResponse) LastModified() time.Time {
- s := crlr.rawResponse.Header.Get("Last-Modified")
+func (dgacr DirectoryGetAccessControlResponse) LastModified() time.Time {
+ s := dgacr.rawResponse.Header.Get("Last-Modified")
if s == "" {
return time.Time{}
}
@@ -3356,44 +4927,77 @@ func (crlr ContainerRenewLeaseResponse) LastModified() time.Time {
return t
}
-// LeaseID returns the value for header x-ms-lease-id.
-func (crlr ContainerRenewLeaseResponse) LeaseID() string {
- return crlr.rawResponse.Header.Get("x-ms-lease-id")
-}
-
// RequestID returns the value for header x-ms-request-id.
-func (crlr ContainerRenewLeaseResponse) RequestID() string {
- return crlr.rawResponse.Header.Get("x-ms-request-id")
+func (dgacr DirectoryGetAccessControlResponse) RequestID() string {
+ return dgacr.rawResponse.Header.Get("x-ms-request-id")
}
// Version returns the value for header x-ms-version.
-func (crlr ContainerRenewLeaseResponse) Version() string {
- return crlr.rawResponse.Header.Get("x-ms-version")
+func (dgacr DirectoryGetAccessControlResponse) Version() string {
+ return dgacr.rawResponse.Header.Get("x-ms-version")
}
-// ContainerSetAccessPolicyResponse ...
-type ContainerSetAccessPolicyResponse struct {
+// XMsACL returns the value for header x-ms-acl.
+func (dgacr DirectoryGetAccessControlResponse) XMsACL() string {
+ return dgacr.rawResponse.Header.Get("x-ms-acl")
+}
+
+// XMsGroup returns the value for header x-ms-group.
+func (dgacr DirectoryGetAccessControlResponse) XMsGroup() string {
+ return dgacr.rawResponse.Header.Get("x-ms-group")
+}
+
+// XMsOwner returns the value for header x-ms-owner.
+func (dgacr DirectoryGetAccessControlResponse) XMsOwner() string {
+ return dgacr.rawResponse.Header.Get("x-ms-owner")
+}
+
+// XMsPermissions returns the value for header x-ms-permissions.
+func (dgacr DirectoryGetAccessControlResponse) XMsPermissions() string {
+ return dgacr.rawResponse.Header.Get("x-ms-permissions")
+}
+
+// DirectoryRenameResponse ...
+type DirectoryRenameResponse struct {
rawResponse *http.Response
}
// Response returns the raw HTTP response object.
-func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response {
- return csapr.rawResponse
+func (drr DirectoryRenameResponse) Response() *http.Response {
+ return drr.rawResponse
}
// StatusCode returns the HTTP status code of the response, e.g. 200.
-func (csapr ContainerSetAccessPolicyResponse) StatusCode() int {
- return csapr.rawResponse.StatusCode
+func (drr DirectoryRenameResponse) StatusCode() int {
+ return drr.rawResponse.StatusCode
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
-func (csapr ContainerSetAccessPolicyResponse) Status() string {
- return csapr.rawResponse.Status
+func (drr DirectoryRenameResponse) Status() string {
+ return drr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (drr DirectoryRenameResponse) ClientRequestID() string {
+ return drr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (drr DirectoryRenameResponse) ContentLength() int64 {
+ s := drr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
}
// Date returns the value for header Date.
-func (csapr ContainerSetAccessPolicyResponse) Date() time.Time {
- s := csapr.rawResponse.Header.Get("Date")
+func (drr DirectoryRenameResponse) Date() time.Time {
+ s := drr.rawResponse.Header.Get("Date")
if s == "" {
return time.Time{}
}
@@ -3404,19 +5008,14 @@ func (csapr ContainerSetAccessPolicyResponse) Date() time.Time {
return t
}
-// ErrorCode returns the value for header x-ms-error-code.
-func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string {
- return csapr.rawResponse.Header.Get("x-ms-error-code")
-}
-
// ETag returns the value for header ETag.
-func (csapr ContainerSetAccessPolicyResponse) ETag() ETag {
- return ETag(csapr.rawResponse.Header.Get("ETag"))
+func (drr DirectoryRenameResponse) ETag() ETag {
+ return ETag(drr.rawResponse.Header.Get("ETag"))
}
// LastModified returns the value for header Last-Modified.
-func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time {
- s := csapr.rawResponse.Header.Get("Last-Modified")
+func (drr DirectoryRenameResponse) LastModified() time.Time {
+ s := drr.rawResponse.Header.Get("Last-Modified")
if s == "" {
return time.Time{}
}
@@ -3427,39 +5026,49 @@ func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time {
return t
}
+// Marker returns the value for header x-ms-continuation.
+func (drr DirectoryRenameResponse) Marker() string {
+ return drr.rawResponse.Header.Get("x-ms-continuation")
+}
+
// RequestID returns the value for header x-ms-request-id.
-func (csapr ContainerSetAccessPolicyResponse) RequestID() string {
- return csapr.rawResponse.Header.Get("x-ms-request-id")
+func (drr DirectoryRenameResponse) RequestID() string {
+ return drr.rawResponse.Header.Get("x-ms-request-id")
}
// Version returns the value for header x-ms-version.
-func (csapr ContainerSetAccessPolicyResponse) Version() string {
- return csapr.rawResponse.Header.Get("x-ms-version")
+func (drr DirectoryRenameResponse) Version() string {
+ return drr.rawResponse.Header.Get("x-ms-version")
}
-// ContainerSetMetadataResponse ...
-type ContainerSetMetadataResponse struct {
+// DirectorySetAccessControlResponse ...
+type DirectorySetAccessControlResponse struct {
rawResponse *http.Response
}
// Response returns the raw HTTP response object.
-func (csmr ContainerSetMetadataResponse) Response() *http.Response {
- return csmr.rawResponse
+func (dsacr DirectorySetAccessControlResponse) Response() *http.Response {
+ return dsacr.rawResponse
}
// StatusCode returns the HTTP status code of the response, e.g. 200.
-func (csmr ContainerSetMetadataResponse) StatusCode() int {
- return csmr.rawResponse.StatusCode
+func (dsacr DirectorySetAccessControlResponse) StatusCode() int {
+ return dsacr.rawResponse.StatusCode
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
-func (csmr ContainerSetMetadataResponse) Status() string {
- return csmr.rawResponse.Status
+func (dsacr DirectorySetAccessControlResponse) Status() string {
+ return dsacr.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (dsacr DirectorySetAccessControlResponse) ClientRequestID() string {
+ return dsacr.rawResponse.Header.Get("x-ms-client-request-id")
}
// Date returns the value for header Date.
-func (csmr ContainerSetMetadataResponse) Date() time.Time {
- s := csmr.rawResponse.Header.Get("Date")
+func (dsacr DirectorySetAccessControlResponse) Date() time.Time {
+ s := dsacr.rawResponse.Header.Get("Date")
if s == "" {
return time.Time{}
}
@@ -3470,19 +5079,14 @@ func (csmr ContainerSetMetadataResponse) Date() time.Time {
return t
}
-// ErrorCode returns the value for header x-ms-error-code.
-func (csmr ContainerSetMetadataResponse) ErrorCode() string {
- return csmr.rawResponse.Header.Get("x-ms-error-code")
-}
-
// ETag returns the value for header ETag.
-func (csmr ContainerSetMetadataResponse) ETag() ETag {
- return ETag(csmr.rawResponse.Header.Get("ETag"))
+func (dsacr DirectorySetAccessControlResponse) ETag() ETag {
+ return ETag(dsacr.rawResponse.Header.Get("ETag"))
}
// LastModified returns the value for header Last-Modified.
-func (csmr ContainerSetMetadataResponse) LastModified() time.Time {
- s := csmr.rawResponse.Header.Get("Last-Modified")
+func (dsacr DirectorySetAccessControlResponse) LastModified() time.Time {
+ s := dsacr.rawResponse.Header.Get("Last-Modified")
if s == "" {
return time.Time{}
}
@@ -3494,30 +5098,13 @@ func (csmr ContainerSetMetadataResponse) LastModified() time.Time {
}
// RequestID returns the value for header x-ms-request-id.
-func (csmr ContainerSetMetadataResponse) RequestID() string {
- return csmr.rawResponse.Header.Get("x-ms-request-id")
+func (dsacr DirectorySetAccessControlResponse) RequestID() string {
+ return dsacr.rawResponse.Header.Get("x-ms-request-id")
}
// Version returns the value for header x-ms-version.
-func (csmr ContainerSetMetadataResponse) Version() string {
- return csmr.rawResponse.Header.Get("x-ms-version")
-}
-
-// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access
-// resources in another domain. Web browsers implement a security restriction known as same-origin policy that
-// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain
-// (the origin domain) to call APIs in another domain
-type CorsRule struct {
- // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS.
- AllowedOrigins string `xml:"AllowedOrigins"`
- // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)
- AllowedMethods string `xml:"AllowedMethods"`
- // AllowedHeaders - the request headers that the origin domain may specify on the CORS request.
- AllowedHeaders string `xml:"AllowedHeaders"`
- // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer
- ExposedHeaders string `xml:"ExposedHeaders"`
- // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request.
- MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"`
+func (dsacr DirectorySetAccessControlResponse) Version() string {
+ return dsacr.rawResponse.Header.Get("x-ms-version")
}
// downloadResponse - Wraps the response from the blobClient.Download method.
@@ -3612,6 +5199,24 @@ func (dr downloadResponse) CacheControl() string {
return dr.rawResponse.Header.Get("Cache-Control")
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (dr downloadResponse) ClientRequestID() string {
+ return dr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentCrc64 returns the value for header x-ms-content-crc64.
+func (dr downloadResponse) ContentCrc64() []byte {
+ s := dr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// ContentDisposition returns the value for header Content-Disposition.
func (dr downloadResponse) ContentDisposition() string {
return dr.rawResponse.Header.Get("Content-Disposition")
@@ -3714,6 +5319,16 @@ func (dr downloadResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (dr downloadResponse) EncryptionKeySha256() string {
+ return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (dr downloadResponse) EncryptionScope() string {
+ return dr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (dr downloadResponse) ErrorCode() string {
return dr.rawResponse.Header.Get("x-ms-error-code")
@@ -3724,6 +5339,11 @@ func (dr downloadResponse) ETag() ETag {
return ETag(dr.rawResponse.Header.Get("ETag"))
}
+// IsSealed returns the value for header x-ms-blob-sealed.
+func (dr downloadResponse) IsSealed() string {
+ return dr.rawResponse.Header.Get("x-ms-blob-sealed")
+}
+
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
func (dr downloadResponse) IsServerEncrypted() string {
return dr.rawResponse.Header.Get("x-ms-server-encrypted")
@@ -3757,16 +5377,112 @@ func (dr downloadResponse) LeaseStatus() LeaseStatusType {
return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status"))
}
+// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id.
+func (dr downloadResponse) ObjectReplicationPolicyID() string {
+ return dr.rawResponse.Header.Get("x-ms-or-policy-id")
+}
+
+// ObjectReplicationRules returns the value for header x-ms-or.
+func (dr downloadResponse) ObjectReplicationRules() string {
+ return dr.rawResponse.Header.Get("x-ms-or")
+}
+
// RequestID returns the value for header x-ms-request-id.
func (dr downloadResponse) RequestID() string {
return dr.rawResponse.Header.Get("x-ms-request-id")
}
+// TagCount returns the value for header x-ms-tag-count.
+func (dr downloadResponse) TagCount() int64 {
+ s := dr.rawResponse.Header.Get("x-ms-tag-count")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
// Version returns the value for header x-ms-version.
func (dr downloadResponse) Version() string {
return dr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (dr downloadResponse) VersionID() string {
+ return dr.rawResponse.Header.Get("x-ms-version-id")
+}
+
+// FilterBlobItem - Blob info from a Filter Blobs API call
+type FilterBlobItem struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"Blob"`
+ Name string `xml:"Name"`
+ ContainerName string `xml:"ContainerName"`
+ TagValue string `xml:"TagValue"`
+}
+
+// FilterBlobSegment - The result of a Filter Blobs API call
+type FilterBlobSegment struct {
+ rawResponse *http.Response
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"EnumerationResults"`
+ ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
+ Where string `xml:"Where"`
+ Blobs []FilterBlobItem `xml:"Blobs>Blob"`
+ NextMarker *string `xml:"NextMarker"`
+}
+
+// Response returns the raw HTTP response object.
+func (fbs FilterBlobSegment) Response() *http.Response {
+ return fbs.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (fbs FilterBlobSegment) StatusCode() int {
+ return fbs.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (fbs FilterBlobSegment) Status() string {
+ return fbs.rawResponse.Status
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (fbs FilterBlobSegment) ClientRequestID() string {
+ return fbs.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// Date returns the value for header Date.
+func (fbs FilterBlobSegment) Date() time.Time {
+ s := fbs.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (fbs FilterBlobSegment) ErrorCode() string {
+ return fbs.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (fbs FilterBlobSegment) RequestID() string {
+ return fbs.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (fbs FilterBlobSegment) Version() string {
+ return fbs.rawResponse.Header.Get("x-ms-version")
+}
+
// GeoReplication - Geo-Replication information for the Secondary Storage Service
type GeoReplication struct {
// Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone'
@@ -3787,6 +5503,14 @@ func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
return d.DecodeElement(gr2, &start)
}
+// JSONTextConfiguration - json text configuration
+type JSONTextConfiguration struct {
+ // XMLName is used for marshalling and is subject to removal in a future release.
+ XMLName xml.Name `xml:"JsonTextConfiguration"`
+ // RecordSeparator - record separator
+ RecordSeparator string `xml:"RecordSeparator"`
+}
+
// KeyInfo - Key information
type KeyInfo struct {
// Start - The date-time the key is active in ISO 8601 UTC time
@@ -3795,14 +5519,6 @@ type KeyInfo struct {
Expiry string `xml:"Expiry"`
}
-//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion
-func NewKeyInfo(Start, Expiry time.Time) KeyInfo {
- return KeyInfo{
- Start: Start.UTC().Format(SASTimeFormat),
- Expiry: Expiry.UTC().Format(SASTimeFormat),
- }
-}
-
// ListBlobsFlatSegmentResponse - An enumeration of blobs
type ListBlobsFlatSegmentResponse struct {
rawResponse *http.Response
@@ -3813,7 +5529,6 @@ type ListBlobsFlatSegmentResponse struct {
Prefix *string `xml:"Prefix"`
Marker *string `xml:"Marker"`
MaxResults *int32 `xml:"MaxResults"`
- Delimiter *string `xml:"Delimiter"`
Segment BlobFlatListSegment `xml:"Blobs"`
NextMarker Marker `xml:"NextMarker"`
}
@@ -3833,6 +5548,11 @@ func (lbfsr ListBlobsFlatSegmentResponse) Status() string {
return lbfsr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string {
+ return lbfsr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentType returns the value for header Content-Type.
func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string {
return lbfsr.rawResponse.Header.Get("Content-Type")
@@ -3896,6 +5616,11 @@ func (lbhsr ListBlobsHierarchySegmentResponse) Status() string {
return lbhsr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string {
+ return lbhsr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentType returns the value for header Content-Type.
func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string {
return lbhsr.rawResponse.Header.Get("Content-Type")
@@ -3957,6 +5682,11 @@ func (lcsr ListContainersSegmentResponse) Status() string {
return lcsr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (lcsr ListContainersSegmentResponse) ClientRequestID() string {
+ return lcsr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (lcsr ListContainersSegmentResponse) ErrorCode() string {
return lcsr.rawResponse.Header.Get("x-ms-error-code")
@@ -4029,6 +5759,11 @@ func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string {
+ return pbcpr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte {
s := pbcpr.rawResponse.Header.Get("Content-MD5")
@@ -4088,6 +5823,19 @@ func (pbcpr PageBlobClearPagesResponse) Version() string {
return pbcpr.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte {
+ s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// PageBlobCopyIncrementalResponse ...
type PageBlobCopyIncrementalResponse struct {
rawResponse *http.Response
@@ -4108,6 +5856,11 @@ func (pbcir PageBlobCopyIncrementalResponse) Status() string {
return pbcir.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string {
+ return pbcir.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// CopyID returns the value for header x-ms-copy-id.
func (pbcir PageBlobCopyIncrementalResponse) CopyID() string {
return pbcir.rawResponse.Header.Get("x-ms-copy-id")
@@ -4184,6 +5937,11 @@ func (pbcr PageBlobCreateResponse) Status() string {
return pbcr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbcr PageBlobCreateResponse) ClientRequestID() string {
+ return pbcr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (pbcr PageBlobCreateResponse) ContentMD5() []byte {
s := pbcr.rawResponse.Header.Get("Content-MD5")
@@ -4210,6 +5968,16 @@ func (pbcr PageBlobCreateResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string {
+ return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (pbcr PageBlobCreateResponse) EncryptionScope() string {
+ return pbcr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (pbcr PageBlobCreateResponse) ErrorCode() string {
return pbcr.rawResponse.Header.Get("x-ms-error-code")
@@ -4248,6 +6016,11 @@ func (pbcr PageBlobCreateResponse) Version() string {
return pbcr.rawResponse.Header.Get("x-ms-version")
}
+// VersionID returns the value for header x-ms-version-id.
+func (pbcr PageBlobCreateResponse) VersionID() string {
+ return pbcr.rawResponse.Header.Get("x-ms-version-id")
+}
+
// PageBlobResizeResponse ...
type PageBlobResizeResponse struct {
rawResponse *http.Response
@@ -4281,6 +6054,11 @@ func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbrr PageBlobResizeResponse) ClientRequestID() string {
+ return pbrr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (pbrr PageBlobResizeResponse) Date() time.Time {
s := pbrr.rawResponse.Header.Get("Date")
@@ -4360,6 +6138,11 @@ func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string {
+ return pbusnr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time {
s := pbusnr.rawResponse.Header.Get("Date")
@@ -4465,6 +6248,16 @@ func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string {
+ return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionScope() string {
+ return pbupfur.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string {
return pbupfur.rawResponse.Header.Get("x-ms-error-code")
@@ -4503,6 +6296,19 @@ func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string {
return pbupfur.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte {
+ s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// PageBlobUploadPagesResponse ...
type PageBlobUploadPagesResponse struct {
rawResponse *http.Response
@@ -4536,6 +6342,11 @@ func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string {
+ return pbupr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ContentMD5 returns the value for header Content-MD5.
func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte {
s := pbupr.rawResponse.Header.Get("Content-MD5")
@@ -4562,6 +6373,16 @@ func (pbupr PageBlobUploadPagesResponse) Date() time.Time {
return t
}
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string {
+ return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (pbupr PageBlobUploadPagesResponse) EncryptionScope() string {
+ return pbupr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (pbupr PageBlobUploadPagesResponse) ErrorCode() string {
return pbupr.rawResponse.Header.Get("x-ms-error-code")
@@ -4600,6 +6421,19 @@ func (pbupr PageBlobUploadPagesResponse) Version() string {
return pbupr.rawResponse.Header.Get("x-ms-version")
}
+// XMsContentCrc64 returns the value for header x-ms-content-crc64.
+func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte {
+ s := pbupr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
// PageList - the list of pages
type PageList struct {
rawResponse *http.Response
@@ -4635,6 +6469,11 @@ func (pl PageList) BlobContentLength() int64 {
return i
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (pl PageList) ClientRequestID() string {
+ return pl.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (pl PageList) Date() time.Time {
s := pl.rawResponse.Header.Get("Date")
@@ -4687,6 +6526,304 @@ type PageRange struct {
End int64 `xml:"End"`
}
+// QueryFormat ...
+type QueryFormat struct {
+ // Type - Possible values include: 'QueryFormatDelimited', 'QueryFormatJSON', 'QueryFormatNone'
+ Type QueryFormatType `xml:"Type"`
+ DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"`
+ JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"`
+}
+
+// QueryRequest - the quick query body
+type QueryRequest struct {
+ // QueryType - the query type
+ QueryType string `xml:"QueryType"`
+ // Expression - a query statement
+ Expression string `xml:"Expression"`
+ InputSerialization *QuerySerialization `xml:"InputSerialization"`
+ OutputSerialization *QuerySerialization `xml:"OutputSerialization"`
+}
+
+// QueryResponse - Wraps the response from the blobClient.Query method.
+type QueryResponse struct {
+ rawResponse *http.Response
+}
+
+// NewMetadata returns user-defined key/value pairs.
+func (qr QueryResponse) NewMetadata() Metadata {
+ md := Metadata{}
+ for k, v := range qr.rawResponse.Header {
+ if len(k) > mdPrefixLen {
+ if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) {
+ md[strings.ToLower(k[mdPrefixLen:])] = v[0]
+ }
+ }
+ }
+ return md
+}
+
+// Response returns the raw HTTP response object.
+func (qr QueryResponse) Response() *http.Response {
+ return qr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (qr QueryResponse) StatusCode() int {
+ return qr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (qr QueryResponse) Status() string {
+ return qr.rawResponse.Status
+}
+
+// Body returns the raw HTTP response object's Body.
+func (qr QueryResponse) Body() io.ReadCloser {
+ return qr.rawResponse.Body
+}
+
+// AcceptRanges returns the value for header Accept-Ranges.
+func (qr QueryResponse) AcceptRanges() string {
+ return qr.rawResponse.Header.Get("Accept-Ranges")
+}
+
+// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
+func (qr QueryResponse) BlobCommittedBlockCount() int32 {
+ s := qr.rawResponse.Header.Get("x-ms-blob-committed-block-count")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ i = 0
+ }
+ return int32(i)
+}
+
+// BlobContentMD5 returns the value for header x-ms-blob-content-md5.
+func (qr QueryResponse) BlobContentMD5() []byte {
+ s := qr.rawResponse.Header.Get("x-ms-blob-content-md5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
+func (qr QueryResponse) BlobSequenceNumber() int64 {
+ s := qr.rawResponse.Header.Get("x-ms-blob-sequence-number")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// BlobType returns the value for header x-ms-blob-type.
+func (qr QueryResponse) BlobType() BlobType {
+ return BlobType(qr.rawResponse.Header.Get("x-ms-blob-type"))
+}
+
+// CacheControl returns the value for header Cache-Control.
+func (qr QueryResponse) CacheControl() string {
+ return qr.rawResponse.Header.Get("Cache-Control")
+}
+
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (qr QueryResponse) ClientRequestID() string {
+ return qr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
+// ContentCrc64 returns the value for header x-ms-content-crc64.
+func (qr QueryResponse) ContentCrc64() []byte {
+ s := qr.rawResponse.Header.Get("x-ms-content-crc64")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// ContentDisposition returns the value for header Content-Disposition.
+func (qr QueryResponse) ContentDisposition() string {
+ return qr.rawResponse.Header.Get("Content-Disposition")
+}
+
+// ContentEncoding returns the value for header Content-Encoding.
+func (qr QueryResponse) ContentEncoding() string {
+ return qr.rawResponse.Header.Get("Content-Encoding")
+}
+
+// ContentLanguage returns the value for header Content-Language.
+func (qr QueryResponse) ContentLanguage() string {
+ return qr.rawResponse.Header.Get("Content-Language")
+}
+
+// ContentLength returns the value for header Content-Length.
+func (qr QueryResponse) ContentLength() int64 {
+ s := qr.rawResponse.Header.Get("Content-Length")
+ if s == "" {
+ return -1
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
+
+// ContentMD5 returns the value for header Content-MD5.
+func (qr QueryResponse) ContentMD5() []byte {
+ s := qr.rawResponse.Header.Get("Content-MD5")
+ if s == "" {
+ return nil
+ }
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b = nil
+ }
+ return b
+}
+
+// ContentRange returns the value for header Content-Range.
+func (qr QueryResponse) ContentRange() string {
+ return qr.rawResponse.Header.Get("Content-Range")
+}
+
+// ContentType returns the value for header Content-Type.
+func (qr QueryResponse) ContentType() string {
+ return qr.rawResponse.Header.Get("Content-Type")
+}
+
+// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
+func (qr QueryResponse) CopyCompletionTime() time.Time {
+ s := qr.rawResponse.Header.Get("x-ms-copy-completion-time")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// CopyID returns the value for header x-ms-copy-id.
+func (qr QueryResponse) CopyID() string {
+ return qr.rawResponse.Header.Get("x-ms-copy-id")
+}
+
+// CopyProgress returns the value for header x-ms-copy-progress.
+func (qr QueryResponse) CopyProgress() string {
+ return qr.rawResponse.Header.Get("x-ms-copy-progress")
+}
+
+// CopySource returns the value for header x-ms-copy-source.
+func (qr QueryResponse) CopySource() string {
+ return qr.rawResponse.Header.Get("x-ms-copy-source")
+}
+
+// CopyStatus returns the value for header x-ms-copy-status.
+func (qr QueryResponse) CopyStatus() CopyStatusType {
+ return CopyStatusType(qr.rawResponse.Header.Get("x-ms-copy-status"))
+}
+
+// CopyStatusDescription returns the value for header x-ms-copy-status-description.
+func (qr QueryResponse) CopyStatusDescription() string {
+ return qr.rawResponse.Header.Get("x-ms-copy-status-description")
+}
+
+// Date returns the value for header Date.
+func (qr QueryResponse) Date() time.Time {
+ s := qr.rawResponse.Header.Get("Date")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256.
+func (qr QueryResponse) EncryptionKeySha256() string {
+ return qr.rawResponse.Header.Get("x-ms-encryption-key-sha256")
+}
+
+// EncryptionScope returns the value for header x-ms-encryption-scope.
+func (qr QueryResponse) EncryptionScope() string {
+ return qr.rawResponse.Header.Get("x-ms-encryption-scope")
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (qr QueryResponse) ErrorCode() string {
+ return qr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// ETag returns the value for header ETag.
+func (qr QueryResponse) ETag() ETag {
+ return ETag(qr.rawResponse.Header.Get("ETag"))
+}
+
+// IsServerEncrypted returns the value for header x-ms-server-encrypted.
+func (qr QueryResponse) IsServerEncrypted() string {
+ return qr.rawResponse.Header.Get("x-ms-server-encrypted")
+}
+
+// LastModified returns the value for header Last-Modified.
+func (qr QueryResponse) LastModified() time.Time {
+ s := qr.rawResponse.Header.Get("Last-Modified")
+ if s == "" {
+ return time.Time{}
+ }
+ t, err := time.Parse(time.RFC1123, s)
+ if err != nil {
+ t = time.Time{}
+ }
+ return t
+}
+
+// LeaseDuration returns the value for header x-ms-lease-duration.
+func (qr QueryResponse) LeaseDuration() LeaseDurationType {
+ return LeaseDurationType(qr.rawResponse.Header.Get("x-ms-lease-duration"))
+}
+
+// LeaseState returns the value for header x-ms-lease-state.
+func (qr QueryResponse) LeaseState() LeaseStateType {
+ return LeaseStateType(qr.rawResponse.Header.Get("x-ms-lease-state"))
+}
+
+// LeaseStatus returns the value for header x-ms-lease-status.
+func (qr QueryResponse) LeaseStatus() LeaseStatusType {
+ return LeaseStatusType(qr.rawResponse.Header.Get("x-ms-lease-status"))
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (qr QueryResponse) RequestID() string {
+ return qr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (qr QueryResponse) Version() string {
+ return qr.rawResponse.Header.Get("x-ms-version")
+}
+
+// QuerySerialization ...
+type QuerySerialization struct {
+ Format QueryFormat `xml:"Format"`
+}
+
// RetentionPolicy - the retention policy which determines how long the associated data should persist
type RetentionPolicy struct {
// Enabled - Indicates whether a retention policy is enabled for the storage service
@@ -4720,6 +6857,11 @@ func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType {
return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind"))
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string {
+ return sgair.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (sgair ServiceGetAccountInfoResponse) Date() time.Time {
s := sgair.rawResponse.Header.Get("Date")
@@ -4773,6 +6915,11 @@ func (sspr ServiceSetPropertiesResponse) Status() string {
return sspr.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (sspr ServiceSetPropertiesResponse) ClientRequestID() string {
+ return sspr.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (sspr ServiceSetPropertiesResponse) ErrorCode() string {
return sspr.rawResponse.Header.Get("x-ms-error-code")
@@ -4821,6 +6968,11 @@ func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType {
return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access"))
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (si SignedIdentifiers) ClientRequestID() string {
+ return si.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (si SignedIdentifiers) Date() time.Time {
s := si.rawResponse.Header.Get("Date")
@@ -4875,6 +7027,8 @@ type StaticWebsite struct {
IndexDocument *string `xml:"IndexDocument"`
// ErrorDocument404Path - The absolute path of the custom 404 page
ErrorDocument404Path *string `xml:"ErrorDocument404Path"`
+ // DefaultIndexDocumentPath - Absolute path of the default index page
+ DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"`
}
// StorageServiceProperties - Storage Service Properties.
@@ -4906,6 +7060,11 @@ func (ssp StorageServiceProperties) Status() string {
return ssp.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (ssp StorageServiceProperties) ClientRequestID() string {
+ return ssp.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// ErrorCode returns the value for header x-ms-error-code.
func (ssp StorageServiceProperties) ErrorCode() string {
return ssp.rawResponse.Header.Get("x-ms-error-code")
@@ -4942,6 +7101,11 @@ func (sss StorageServiceStats) Status() string {
return sss.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (sss StorageServiceStats) ClientRequestID() string {
+ return sss.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (sss StorageServiceStats) Date() time.Time {
s := sss.rawResponse.Header.Get("Date")
@@ -4970,6 +7134,51 @@ func (sss StorageServiceStats) Version() string {
return sss.rawResponse.Header.Get("x-ms-version")
}
+// SubmitBatchResponse - Wraps the response from the serviceClient.SubmitBatch method.
+type SubmitBatchResponse struct {
+ rawResponse *http.Response
+}
+
+// Response returns the raw HTTP response object.
+func (sbr SubmitBatchResponse) Response() *http.Response {
+ return sbr.rawResponse
+}
+
+// StatusCode returns the HTTP status code of the response, e.g. 200.
+func (sbr SubmitBatchResponse) StatusCode() int {
+ return sbr.rawResponse.StatusCode
+}
+
+// Status returns the HTTP status message of the response, e.g. "200 OK".
+func (sbr SubmitBatchResponse) Status() string {
+ return sbr.rawResponse.Status
+}
+
+// Body returns the raw HTTP response object's Body.
+func (sbr SubmitBatchResponse) Body() io.ReadCloser {
+ return sbr.rawResponse.Body
+}
+
+// ContentType returns the value for header Content-Type.
+func (sbr SubmitBatchResponse) ContentType() string {
+ return sbr.rawResponse.Header.Get("Content-Type")
+}
+
+// ErrorCode returns the value for header x-ms-error-code.
+func (sbr SubmitBatchResponse) ErrorCode() string {
+ return sbr.rawResponse.Header.Get("x-ms-error-code")
+}
+
+// RequestID returns the value for header x-ms-request-id.
+func (sbr SubmitBatchResponse) RequestID() string {
+ return sbr.rawResponse.Header.Get("x-ms-request-id")
+}
+
+// Version returns the value for header x-ms-version.
+func (sbr SubmitBatchResponse) Version() string {
+ return sbr.rawResponse.Header.Get("x-ms-version")
+}
+
// UserDelegationKey - A user delegation key
type UserDelegationKey struct {
rawResponse *http.Response
@@ -4989,13 +7198,6 @@ type UserDelegationKey struct {
Value string `xml:"Value"`
}
-func (udk UserDelegationKey) ComputeHMACSHA256(message string) (base64String string) {
- bytes, _ := base64.StdEncoding.DecodeString(udk.Value)
- h := hmac.New(sha256.New, bytes)
- h.Write([]byte(message))
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
-
// MarshalXML implements the xml.Marshaler interface for UserDelegationKey.
func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
udk2 := (*userDelegationKey)(unsafe.Pointer(&udk))
@@ -5023,6 +7225,11 @@ func (udk UserDelegationKey) Status() string {
return udk.rawResponse.Status
}
+// ClientRequestID returns the value for header x-ms-client-request-id.
+func (udk UserDelegationKey) ClientRequestID() string {
+ return udk.rawResponse.Header.Get("x-ms-client-request-id")
+}
+
// Date returns the value for header Date.
func (udk UserDelegationKey) Date() time.Time {
s := udk.rawResponse.Header.Get("Date")
@@ -5142,57 +7349,67 @@ type userDelegationKey struct {
// internal type used for marshalling
type accessPolicy struct {
- Start timeRFC3339 `xml:"Start"`
- Expiry timeRFC3339 `xml:"Expiry"`
- Permission string `xml:"Permission"`
+ Start *timeRFC3339 `xml:"Start"`
+ Expiry *timeRFC3339 `xml:"Expiry"`
+ Permission *string `xml:"Permission"`
}
// internal type used for marshalling
type blobProperties struct {
// XMLName is used for marshalling and is subject to removal in a future release.
- XMLName xml.Name `xml:"Properties"`
- CreationTime *timeRFC1123 `xml:"Creation-Time"`
- LastModified timeRFC1123 `xml:"Last-Modified"`
- Etag ETag `xml:"Etag"`
- ContentLength *int64 `xml:"Content-Length"`
- ContentType *string `xml:"Content-Type"`
- ContentEncoding *string `xml:"Content-Encoding"`
- ContentLanguage *string `xml:"Content-Language"`
- ContentMD5 base64Encoded `xml:"Content-MD5"`
- ContentDisposition *string `xml:"Content-Disposition"`
- CacheControl *string `xml:"Cache-Control"`
- BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"`
- BlobType BlobType `xml:"BlobType"`
- LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
- LeaseState LeaseStateType `xml:"LeaseState"`
- LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
- CopyID *string `xml:"CopyId"`
- CopyStatus CopyStatusType `xml:"CopyStatus"`
- CopySource *string `xml:"CopySource"`
- CopyProgress *string `xml:"CopyProgress"`
- CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
- CopyStatusDescription *string `xml:"CopyStatusDescription"`
- ServerEncrypted *bool `xml:"ServerEncrypted"`
- IncrementalCopy *bool `xml:"IncrementalCopy"`
- DestinationSnapshot *string `xml:"DestinationSnapshot"`
- DeletedTime *timeRFC1123 `xml:"DeletedTime"`
- RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
- AccessTier AccessTierType `xml:"AccessTier"`
- AccessTierInferred *bool `xml:"AccessTierInferred"`
- ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
- AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
+ XMLName xml.Name `xml:"Properties"`
+ CreationTime *timeRFC1123 `xml:"Creation-Time"`
+ LastModified timeRFC1123 `xml:"Last-Modified"`
+ Etag ETag `xml:"Etag"`
+ ContentLength *int64 `xml:"Content-Length"`
+ ContentType *string `xml:"Content-Type"`
+ ContentEncoding *string `xml:"Content-Encoding"`
+ ContentLanguage *string `xml:"Content-Language"`
+ ContentMD5 base64Encoded `xml:"Content-MD5"`
+ ContentDisposition *string `xml:"Content-Disposition"`
+ CacheControl *string `xml:"Cache-Control"`
+ BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"`
+ BlobType BlobType `xml:"BlobType"`
+ LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
+ LeaseState LeaseStateType `xml:"LeaseState"`
+ LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
+ CopyID *string `xml:"CopyId"`
+ CopyStatus CopyStatusType `xml:"CopyStatus"`
+ CopySource *string `xml:"CopySource"`
+ CopyProgress *string `xml:"CopyProgress"`
+ CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
+ CopyStatusDescription *string `xml:"CopyStatusDescription"`
+ ServerEncrypted *bool `xml:"ServerEncrypted"`
+ IncrementalCopy *bool `xml:"IncrementalCopy"`
+ DestinationSnapshot *string `xml:"DestinationSnapshot"`
+ DeletedTime *timeRFC1123 `xml:"DeletedTime"`
+ RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
+ AccessTier AccessTierType `xml:"AccessTier"`
+ AccessTierInferred *bool `xml:"AccessTierInferred"`
+ ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
+ CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"`
+ EncryptionScope *string `xml:"EncryptionScope"`
+ AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
+ TagCount *int32 `xml:"TagCount"`
+ ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
+ IsSealed *bool `xml:"IsSealed"`
+ RehydratePriority RehydratePriorityType `xml:"RehydratePriority"`
}
// internal type used for marshalling
type containerProperties struct {
- LastModified timeRFC1123 `xml:"Last-Modified"`
- Etag ETag `xml:"Etag"`
- LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
- LeaseState LeaseStateType `xml:"LeaseState"`
- LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
- PublicAccess PublicAccessType `xml:"PublicAccess"`
- HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
- HasLegalHold *bool `xml:"HasLegalHold"`
+ LastModified timeRFC1123 `xml:"Last-Modified"`
+ Etag ETag `xml:"Etag"`
+ LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
+ LeaseState LeaseStateType `xml:"LeaseState"`
+ LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
+ PublicAccess PublicAccessType `xml:"PublicAccess"`
+ HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
+ HasLegalHold *bool `xml:"HasLegalHold"`
+ DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"`
+ PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"`
+ DeletedTime *timeRFC1123 `xml:"DeletedTime"`
+ RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
}
// internal type used for marshalling
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
index 42e27da4c4..b55ae12bbd 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
@@ -33,6 +33,14 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
+// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
+// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
+// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
+// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
+// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
+// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
+// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
@@ -42,14 +50,14 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
-func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
+func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -61,7 +69,7 @@ func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64
}
// clearPagesPreparer prepares the ClearPages request.
-func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -79,6 +87,18 @@ func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *in
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
@@ -202,35 +222,45 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
// expressed in seconds. For more information, see Setting
-// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified,
-// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
-// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
-// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
-// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
-// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
-// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
-// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
-// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
-// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
-// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
-// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
-// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
-// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
-// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
-// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
-// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
-// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
-// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
-// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
-// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
+// Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob.
+// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and
+// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this
+// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's
+// content language. If specified, this property is stored with the blob and returned with a read request.
+// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for
+// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache
+// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional.
+// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value
+// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from
+// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules
+// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if
+// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition
+// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to
+// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account
+// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the
+// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
+// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
+// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version
+// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the
+// request. If not specified, encryption is performed with the default account encryption scope. For more information,
+// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a
+// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
+// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
+// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
+// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
+// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations.
+func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (*PageBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
+ req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString)
if err != nil {
return nil, err
}
@@ -242,7 +272,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, bl
}
// createPreparer prepares the Create request.
-func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -253,6 +283,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ if tier != PremiumPageBlobAccessTierNone {
+ req.Header.Set("x-ms-access-tier", string(tier))
+ }
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
@@ -279,6 +312,18 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -291,6 +336,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
@@ -299,6 +347,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
+ if blobTagsString != nil {
+ req.Header.Set("x-ms-tags", *blobTagsString)
+ }
req.Header.Set("x-ms-blob-type", "PageBlob")
return req, nil
}
@@ -327,17 +378,18 @@ func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.R
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
-// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
-// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-// analytics logging is enabled.
-func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
+// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL
+// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated,
+// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
+// enabled.
+func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -349,7 +401,7 @@ func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string
}
// getPageRangesPreparer prepares the GetPageRanges request.
-func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -381,6 +433,9 @@ func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *in
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -425,22 +480,25 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
-// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
-// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
-// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
-// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
-// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
-// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
+// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header
+// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the
+// target blob. The response will only contain pages that were changed between the target blob and its previous
+// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the
+// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this
+// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
+// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
+// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to
+// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -452,7 +510,7 @@ func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *st
}
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
-func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -469,6 +527,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout
}
params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode()
+ if prevSnapshotURL != nil {
+ req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL)
+ }
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
@@ -487,6 +548,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -526,20 +590,28 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response)
// see Setting
// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
-// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
-// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
-// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
-// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
-// logs when storage analytics logging is enabled.
-func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
+// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
+// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
+// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
+// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
+// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
+// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
+// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
+// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
+// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
+// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
+// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
+// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+// analytics logging is enabled.
+func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -551,7 +623,7 @@ func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64
}
// resizePreparer prepares the Resize request.
-func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -565,6 +637,18 @@ func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *in
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@@ -682,11 +766,19 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
//
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
-// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
-// information, see Setting
// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
+// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
+// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
+// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
+// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
+// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
+// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
+// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
@@ -694,9 +786,10 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
-// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
-// recorded in the analytics logs when storage analytics logging is enabled.
-func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
+// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
+// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
+// analytics logs when storage analytics logging is enabled.
+func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@@ -705,7 +798,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
+ req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@@ -717,7 +810,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
}
// uploadPagesPreparer prepares the UploadPages request.
-func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -732,12 +825,27 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
if transactionalContentMD5 != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
}
+ if transactionalContentCrc64 != nil {
+ req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
+ }
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
@@ -759,6 +867,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@@ -785,32 +896,41 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
+// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
// seconds. For more information, see Setting
-// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's
-// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
-// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
-// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
-// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
-// specify this header value to operate only on a blob if it has been modified since the specified date/time.
-// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
-// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
-// specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this
-// header value to operate only on a blob if it has been modified since the specified date/time.
-// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
-// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
-// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
-// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-// analytics logging is enabled.
-func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
+// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt
+// the data provided in the request. If not specified, encryption is performed with the root account encryption key.
+// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
+// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
+// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
+// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later.
+// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified,
+// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for
+// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and
+// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has
+// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
+// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
+// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
+// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
+// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
+// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to
+// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a
+// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to
+// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag
+// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on
+// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit
+// that is recorded in the analytics logs when storage analytics logging is enabled.
+func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
- req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
+ req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -822,7 +942,7 @@ func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL s
}
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
-func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
+func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -838,8 +958,23 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source
if sourceContentMD5 != nil {
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
}
+ if sourceContentcrc64 != nil {
+ req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
+ }
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
req.Header.Set("x-ms-range", rangeParameter)
+ if encryptionKey != nil {
+ req.Header.Set("x-ms-encryption-key", *encryptionKey)
+ }
+ if encryptionKeySha256 != nil {
+ req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
+ }
+ if encryptionAlgorithm != EncryptionAlgorithmNone {
+ req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
+ }
+ if encryptionScope != nil {
+ req.Header.Set("x-ms-encryption-scope", *encryptionScope)
+ }
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -864,6 +999,9 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
+ if ifTags != nil {
+ req.Header.Set("x-ms-if-tags", *ifTags)
+ }
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
index 6c896b729a..daff580ae2 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
@@ -25,6 +25,98 @@ func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
return serviceClient{newManagementClient(url, p)}
}
+// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given
+// search expression. Filter blobs searches across all containers within a storage account but can be scoped within
+// the expression to a single container.
+//
+// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters
+// the results to return only to return only blobs whose tags match the specified expression. marker is a string value
+// that identifies the portion of the list of containers to be returned with the next listing operation. The operation
+// returns the NextMarker value within the response body if the listing operation did not return all containers
+// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter
+// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is
+// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a
+// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a
+// partition boundary, then the service will return a continuation token for retrieving the remainder of the results.
+// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the
+// default of 5000.
+func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) {
+ if err := validate([]validation{
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
+ {targetValue: maxresults,
+ constraints: []constraint{{target: "maxresults", name: null, rule: false,
+ chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*FilterBlobSegment), err
+}
+
+// filterBlobsPreparer prepares the FilterBlobs request.
+func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("GET", client.url, nil)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ if where != nil && len(*where) > 0 {
+ params.Set("where", *where)
+ }
+ if marker != nil && len(*marker) > 0 {
+ params.Set("marker", *marker)
+ }
+ if maxresults != nil {
+ params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
+ }
+ params.Set("comp", "blobs")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ return req, nil
+}
+
+// filterBlobsResponder handles the response to the FilterBlobs request.
+func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ result := &FilterBlobSegment{rawResponse: resp.Response()}
+ if err != nil {
+ return result, err
+ }
+ defer resp.Response().Body.Close()
+ b, err := ioutil.ReadAll(resp.Response().Body)
+ if err != nil {
+ return result, err
+ }
+ if len(b) > 0 {
+ b = removeBOM(b)
+ err = xml.Unmarshal(b, result)
+ if err != nil {
+ return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
+ }
+ }
+ return result, nil
+}
+
// GetAccountInfo returns the sku name and account kind
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
req, err := client.getAccountInfoPreparer()
@@ -203,7 +295,7 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe
return result, nil
}
-// GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using
+// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using
// bearer token authentication.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see Setting
// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
-func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
+func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
if err := validate([]validation{
{targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false,
@@ -322,7 +414,7 @@ func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *s
}
// listContainersSegmentPreparer prepares the ListContainersSegment request.
-func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
+func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -337,8 +429,8 @@ func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker
if maxresults != nil {
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
}
- if include != ListContainersIncludeNone {
- params.Set("include", string(include))
+ if include != nil && len(include) > 0 {
+ params.Set("include", joinConst(include, ","))
}
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
@@ -465,3 +557,62 @@ func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipe
resp.Response().Body.Close()
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
}
+
+// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request.
+//
+// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
+// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be
+// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the
+// timeout parameter is expressed in seconds. For more information, see Setting
+// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB
+// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) {
+ if err := validate([]validation{
+ {targetValue: body,
+ constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
+ {targetValue: timeout,
+ constraints: []constraint{{target: "timeout", name: null, rule: false,
+ chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
+ return nil, err
+ }
+ req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*SubmitBatchResponse), err
+}
+
+// submitBatchPreparer prepares the SubmitBatch request.
+func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) {
+ req, err := pipeline.NewRequest("POST", client.url, body)
+ if err != nil {
+ return req, pipeline.NewError(err, "failed to create request")
+ }
+ params := req.URL.Query()
+ if timeout != nil {
+ params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
+ }
+ params.Set("comp", "batch")
+ req.URL.RawQuery = params.Encode()
+ req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ req.Header.Set("Content-Type", multipartContentType)
+ req.Header.Set("x-ms-version", ServiceVersion)
+ if requestID != nil {
+ req.Header.Set("x-ms-client-request-id", *requestID)
+ }
+ return req, nil
+}
+
+// submitBatchResponder handles the response to the SubmitBatch request.
+func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) {
+ err := validateResponse(resp, http.StatusOK)
+ if resp == nil {
+ return nil, err
+ }
+ return &SubmitBatchResponse{rawResponse: resp.Response()}, err
+}
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
index 4b49c18662..200b2f5684 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
@@ -5,7 +5,7 @@ package azblob
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
- return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09"
+ return "Azure-SDK-For-Go/0.0.0 azblob/2019-12-12"
}
// Version returns the semantic version (see http://semver.org) of the client.
diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
index 8c7f594532..5c086c5cf1 100644
--- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
+++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
@@ -45,7 +45,7 @@ func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
///////////////////////////////////////////////////////////////////////////////
-// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
+// DownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry.
type DownloadResponse struct {
r *downloadResponse
ctx context.Context
@@ -63,11 +63,9 @@ func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
}
return NewRetryReader(r.ctx, r.Response(), r.getInfo, o,
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
- resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
- BlobAccessConditions{
- ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag},
- },
- false)
+ resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{
+ ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag},
+ }, false, o.ClientProvidedKeyOptions)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go
index 79b4dabcef..c7b29c0b35 100644
--- a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go
+++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go
@@ -42,19 +42,15 @@ func writeConf() {
autoDetect = ieCfg.fAutoDetect
}
- // Try WinHTTP default proxy.
- if defaultCfg, err := getDefaultProxyConfiguration(); err == nil {
- defer globalFreeWrapper(defaultCfg.lpszProxy)
- defer globalFreeWrapper(defaultCfg.lpszProxyBypass)
-
- newProxy := StringFromUTF16Ptr(defaultCfg.lpszProxy)
- if proxy == "" {
- proxy = newProxy
- }
-
- newProxyByPass := StringFromUTF16Ptr(defaultCfg.lpszProxyBypass)
- if proxyByPass == "" {
- proxyByPass = newProxyByPass
+ if proxy == "" && !autoDetect{
+ // Try WinHTTP default proxy.
+ if defaultCfg, err := getDefaultProxyConfiguration(); err == nil {
+ defer globalFreeWrapper(defaultCfg.lpszProxy)
+ defer globalFreeWrapper(defaultCfg.lpszProxyBypass)
+
+ // Always set both of these (they are a pair, it doesn't make sense to set one here and keep the value of the other from above)
+ proxy = StringFromUTF16Ptr(defaultCfg.lpszProxy)
+ proxyByPass = StringFromUTF16Ptr(defaultCfg.lpszProxyBypass)
}
}
@@ -168,7 +164,27 @@ func parseRegedit(regedit regeditValues) ProxyConf {
}
func readRegedit() (values regeditValues, err error) {
- k, err := registry.OpenKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE)
+ var proxySettingsPerUser uint64 = 1 // 1 is the default value to consider current user
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE)
+ if err == nil {
+ //We had used the below variable tempPrxUsrSettings, because the Golang method GetIntegerValue
+ //sets the value to zero even it fails.
+ tempPrxUsrSettings, _, err := k.GetIntegerValue("ProxySettingsPerUser")
+ if err == nil {
+ //consider the value of tempPrxUsrSettings if it is a success
+ proxySettingsPerUser = tempPrxUsrSettings
+ }
+ k.Close()
+ }
+
+ var hkey registry.Key
+ if proxySettingsPerUser == 0 {
+ hkey = registry.LOCAL_MACHINE
+ } else {
+ hkey = registry.CURRENT_USER
+ }
+
+ k, err = registry.OpenKey(hkey, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE)
if err != nil {
return
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
index 5b8f4db84a..d4cbfc0bd2 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
@@ -473,7 +473,7 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter
return metas, resp.partial, errors.Wrap(resp.metaErrs.Err(), "incomplete view")
}
- level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial))
+ level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "duration_ms", time.Since(start).Milliseconds(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial))
return metas, resp.partial, nil
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go
index 2b6ece295e..851dfa9d98 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/index.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go
@@ -111,12 +111,9 @@ func (i HealthStats) Issue347OutsideChunksErr() error {
return nil
}
-// CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure.
-func (i HealthStats) CriticalErr() error {
- var errMsg []string
-
- if i.OutOfOrderSeries > 0 {
- errMsg = append(errMsg, fmt.Sprintf(
+func (i HealthStats) OutOfOrderChunksErr() error {
+ if i.OutOfOrderChunks > 0 {
+ return errors.New(fmt.Sprintf(
"%d/%d series have an average of %.3f out-of-order chunks: "+
"%.3f of these are exact duplicates (in terms of data and time range)",
i.OutOfOrderSeries,
@@ -125,6 +122,12 @@ func (i HealthStats) CriticalErr() error {
float64(i.DuplicatedChunks)/float64(i.OutOfOrderChunks),
))
}
+ return nil
+}
+
+// CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure.
+func (i HealthStats) CriticalErr() error {
+ var errMsg []string
n := i.OutsideChunks - (i.CompleteOutsideChunks + i.Issue347OutsideChunks)
if n > 0 {
@@ -158,6 +161,10 @@ func (i HealthStats) AnyErr() error {
errMsg = append(errMsg, err.Error())
}
+ if err := i.OutOfOrderChunksErr(); err != nil {
+ errMsg = append(errMsg, err.Error())
+ }
+
if len(errMsg) > 0 {
return errors.New(strings.Join(errMsg, ", "))
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
index 93f1fd88b3..20ebecaaca 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
@@ -17,6 +17,18 @@ import (
"github.com/thanos-io/thanos/pkg/objstore"
)
+// ReaderPoolMetrics holds metrics tracked by ReaderPool.
+type ReaderPoolMetrics struct {
+ lazyReader *LazyBinaryReaderMetrics
+}
+
+// NewReaderPoolMetrics makes new ReaderPoolMetrics.
+func NewReaderPoolMetrics(reg prometheus.Registerer) *ReaderPoolMetrics {
+ return &ReaderPoolMetrics{
+ lazyReader: NewLazyBinaryReaderMetrics(reg),
+ }
+}
+
// ReaderPool is used to istantiate new index-header readers and keep track of them.
// When the lazy reader is enabled, the pool keeps track of all instantiated readers
// and automatically close them once the idle timeout is reached. A closed lazy reader
@@ -24,8 +36,8 @@ import (
type ReaderPool struct {
lazyReaderEnabled bool
lazyReaderIdleTimeout time.Duration
- lazyReaderMetrics *LazyBinaryReaderMetrics
logger log.Logger
+ metrics *ReaderPoolMetrics
// Channel used to signal once the pool is closing.
close chan struct{}
@@ -36,12 +48,12 @@ type ReaderPool struct {
}
// NewReaderPool makes a new ReaderPool.
-func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, reg prometheus.Registerer) *ReaderPool {
+func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, metrics *ReaderPoolMetrics) *ReaderPool {
p := &ReaderPool{
logger: logger,
+ metrics: metrics,
lazyReaderEnabled: lazyReaderEnabled,
lazyReaderIdleTimeout: lazyReaderIdleTimeout,
- lazyReaderMetrics: NewLazyBinaryReaderMetrics(reg),
lazyReaders: make(map[*LazyBinaryReader]struct{}),
close: make(chan struct{}),
}
@@ -73,7 +85,7 @@ func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt
var err error
if p.lazyReaderEnabled {
- reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.lazyReaderMetrics, p.onLazyReaderClosed)
+ reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.onLazyReaderClosed)
} else {
reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling)
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go
index b3c8b9d1f0..f2d40bd045 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go
@@ -67,6 +67,8 @@ const (
// IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424)
// This reason can be ignored when vertical block sharding will be implemented.
IndexSizeExceedingNoCompactReason = "index-size-exceeding"
+ // OutOfOrderChunksNoCompactReason is a reason of to no compact block with index contains out of order chunk so that the compaction is not blocked.
+ OutOfOrderChunksNoCompactReason = "block-index-out-of-order-chunk"
)
// NoCompactMark marker stores reason of block being excluded from compaction if needed.
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go
index 0cbda37e19..f02c09a977 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go
@@ -114,6 +114,7 @@ func (m *Matchers) UnmarshalYAML(value *yaml.Node) (err error) {
type DeletionRequest struct {
Matchers Matchers `json:"matchers" yaml:"matchers"`
Intervals tombstones.Intervals `json:"intervals,omitempty" yaml:"intervals,omitempty"`
+ RequestID string `json:"request_id,omitempty" yaml:"request_id,omitempty"`
}
type File struct {
diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go
index cb5adcb0d0..02059e8e69 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go
@@ -21,6 +21,7 @@ import (
"gopkg.in/yaml.v2"
"github.com/thanos-io/thanos/pkg/discovery/dns"
+ memcacheDiscovery "github.com/thanos-io/thanos/pkg/discovery/memcache"
"github.com/thanos-io/thanos/pkg/extprom"
"github.com/thanos-io/thanos/pkg/gate"
"github.com/thanos-io/thanos/pkg/model"
@@ -53,6 +54,7 @@ var (
MaxGetMultiConcurrency: 100,
MaxGetMultiBatchSize: 0,
DNSProviderUpdateInterval: 10 * time.Second,
+ AutoDiscovery: false,
}
)
@@ -114,6 +116,9 @@ type MemcachedClientConfig struct {
// DNSProviderUpdateInterval specifies the DNS discovery update interval.
DNSProviderUpdateInterval time.Duration `yaml:"dns_provider_update_interval"`
+
+ // AutoDiscovery configures memached client to perform auto-discovery instead of DNS resolution
+ AutoDiscovery bool `yaml:"auto_discovery"`
}
func (c *MemcachedClientConfig) validate() error {
@@ -153,8 +158,8 @@ type memcachedClient struct {
// Name provides an identifier for the instantiated Client
name string
- // DNS provider used to keep the memcached servers list updated.
- dnsProvider *dns.Provider
+ // Address provider used to keep the memcached servers list updated.
+ addressProvider AddressProvider
// Channel used to notify internal goroutines when they should quit.
stop chan struct{}
@@ -177,6 +182,15 @@ type memcachedClient struct {
dataSize *prometheus.HistogramVec
}
+// AddressProvider performs node address resolution given a list of clusters.
+type AddressProvider interface {
+ // Resolves the provided list of memcached cluster to the actual nodes
+ Resolve(context.Context, []string) error
+
+ // Returns the nodes
+ Addresses() []string
+}
+
type memcachedGetMultiResult struct {
items map[string]*memcache.Item
err error
@@ -220,20 +234,31 @@ func newMemcachedClient(
reg prometheus.Registerer,
name string,
) (*memcachedClient, error) {
- dnsProvider := dns.NewProvider(
- logger,
- extprom.WrapRegistererWithPrefix("thanos_memcached_", reg),
- dns.GolangResolverType,
- )
+ promRegisterer := extprom.WrapRegistererWithPrefix("thanos_memcached_", reg)
+
+ var addressProvider AddressProvider
+ if config.AutoDiscovery {
+ addressProvider = memcacheDiscovery.NewProvider(
+ logger,
+ promRegisterer,
+ config.Timeout,
+ )
+ } else {
+ addressProvider = dns.NewProvider(
+ logger,
+ extprom.WrapRegistererWithPrefix("thanos_memcached_", reg),
+ dns.MiekgdnsResolverType,
+ )
+ }
c := &memcachedClient{
- logger: log.With(logger, "name", name),
- config: config,
- client: client,
- selector: selector,
- dnsProvider: dnsProvider,
- asyncQueue: make(chan func(), config.MaxAsyncBufferSize),
- stop: make(chan struct{}, 1),
+ logger: log.With(logger, "name", name),
+ config: config,
+ client: client,
+ selector: selector,
+ addressProvider: addressProvider,
+ asyncQueue: make(chan func(), config.MaxAsyncBufferSize),
+ stop: make(chan struct{}, 1),
getMultiGate: gate.New(
extprom.WrapRegistererWithPrefix("thanos_memcached_getmulti_", reg),
config.MaxGetMultiConcurrency,
@@ -561,11 +586,11 @@ func (c *memcachedClient) resolveAddrs() error {
defer cancel()
// If some of the dns resolution fails, log the error.
- if err := c.dnsProvider.Resolve(ctx, c.config.Addresses); err != nil {
+ if err := c.addressProvider.Resolve(ctx, c.config.Addresses); err != nil {
level.Error(c.logger).Log("msg", "failed to resolve addresses for memcached", "addresses", strings.Join(c.config.Addresses, ","), "err", err)
}
// Fail in case no server address is resolved.
- servers := c.dnsProvider.Addresses()
+ servers := c.addressProvider.Addresses()
if len(servers) == 0 {
return fmt.Errorf("no server address resolved for %s", c.name)
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
index a0f62fbc61..547eae57f2 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
@@ -240,6 +240,7 @@ type DefaultGrouper struct {
verticalCompactions *prometheus.CounterVec
garbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
+ blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
}
@@ -252,6 +253,7 @@ func NewDefaultGrouper(
reg prometheus.Registerer,
blocksMarkedForDeletion prometheus.Counter,
garbageCollectedBlocks prometheus.Counter,
+ blocksMarkedForNoCompact prometheus.Counter,
hashFunc metadata.HashFunc,
) *DefaultGrouper {
return &DefaultGrouper{
@@ -279,9 +281,10 @@ func NewDefaultGrouper(
Name: "thanos_compact_group_vertical_compactions_total",
Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.",
}, []string{"group"}),
- garbageCollectedBlocks: garbageCollectedBlocks,
- blocksMarkedForDeletion: blocksMarkedForDeletion,
- hashFunc: hashFunc,
+ blocksMarkedForNoCompact: blocksMarkedForNoCompact,
+ garbageCollectedBlocks: garbageCollectedBlocks,
+ blocksMarkedForDeletion: blocksMarkedForDeletion,
+ hashFunc: hashFunc,
}
}
@@ -309,6 +312,7 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro
g.verticalCompactions.WithLabelValues(groupKey),
g.garbageCollectedBlocks,
g.blocksMarkedForDeletion,
+ g.blocksMarkedForNoCompact,
g.hashFunc,
)
if err != nil {
@@ -346,6 +350,7 @@ type Group struct {
verticalCompactions prometheus.Counter
groupGarbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
+ blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
}
@@ -365,6 +370,7 @@ func NewGroup(
verticalCompactions prometheus.Counter,
groupGarbageCollectedBlocks prometheus.Counter,
blocksMarkedForDeletion prometheus.Counter,
+ blocksMarkedForNoCompact prometheus.Counter,
hashFunc metadata.HashFunc,
) (*Group, error) {
if logger == nil {
@@ -385,6 +391,7 @@ func NewGroup(
verticalCompactions: verticalCompactions,
groupGarbageCollectedBlocks: groupGarbageCollectedBlocks,
blocksMarkedForDeletion: blocksMarkedForDeletion,
+ blocksMarkedForNoCompact: blocksMarkedForNoCompact,
hashFunc: hashFunc,
}
return g, nil
@@ -541,6 +548,26 @@ func IsIssue347Error(err error) bool {
return ok
}
+// OutOfOrderChunkError is a type wrapper for OOO chunk error from validating block index.
+type OutOfOrderChunksError struct {
+ err error
+ id ulid.ULID
+}
+
+func (e OutOfOrderChunksError) Error() string {
+ return e.err.Error()
+}
+
+func outOfOrderChunkError(err error, brokenBlock ulid.ULID) OutOfOrderChunksError {
+ return OutOfOrderChunksError{err: err, id: brokenBlock}
+}
+
+// IsOutOfOrderChunk returns true if the base error is a OutOfOrderChunkError.
+func IsOutOfOrderChunkError(err error) bool {
+ _, ok := errors.Cause(err).(OutOfOrderChunksError)
+ return ok
+}
+
// HaltError is a type wrapper for errors that should halt any further progress on compactions.
type HaltError struct {
err error
@@ -749,6 +776,10 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels))
}
+ if err := stats.OutOfOrderChunksErr(); err != nil {
+ return false, ulid.ULID{}, outOfOrderChunkError(errors.Wrapf(err, "blocks with out-of-order chunks are dropped from compaction: %s", bdir), meta.ULID)
+ }
+
if err := stats.Issue347OutsideChunksErr(); err != nil {
return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID)
}
@@ -759,7 +790,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
}
toCompactDirs = append(toCompactDirs, bdir)
}
- level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin))
+ level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())
begin = time.Now()
compID, err = comp.Compact(dir, toCompactDirs, nil)
@@ -784,7 +815,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
cg.verticalCompactions.Inc()
}
level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID,
- "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks)
+ "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "overlapping_blocks", overlappingBlocks)
bdir := filepath.Join(dir, compID.String())
index := filepath.Join(bdir, block.IndexFilename)
@@ -821,7 +852,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
if err := block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc); err != nil {
return false, ulid.ULID{}, retry(errors.Wrapf(err, "upload of %s failed", compID))
}
- level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin))
+ level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())
// Mark for deletion the blocks we just compacted from the group and bucket so they do not get included
// into the next planning cycle.
@@ -852,14 +883,15 @@ func (cg *Group) deleteBlock(id ulid.ULID, bdir string) error {
// BucketCompactor compacts blocks in a bucket.
type BucketCompactor struct {
- logger log.Logger
- sy *Syncer
- grouper Grouper
- comp Compactor
- planner Planner
- compactDir string
- bkt objstore.Bucket
- concurrency int
+ logger log.Logger
+ sy *Syncer
+ grouper Grouper
+ comp Compactor
+ planner Planner
+ compactDir string
+ bkt objstore.Bucket
+ concurrency int
+ skipBlocksWithOutOfOrderChunks bool
}
// NewBucketCompactor creates a new bucket compactor.
@@ -872,19 +904,21 @@ func NewBucketCompactor(
compactDir string,
bkt objstore.Bucket,
concurrency int,
+ skipBlocksWithOutOfOrderChunks bool,
) (*BucketCompactor, error) {
if concurrency <= 0 {
return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency)
}
return &BucketCompactor{
- logger: logger,
- sy: sy,
- grouper: grouper,
- planner: planner,
- comp: comp,
- compactDir: compactDir,
- bkt: bkt,
- concurrency: concurrency,
+ logger: logger,
+ sy: sy,
+ grouper: grouper,
+ planner: planner,
+ comp: comp,
+ compactDir: compactDir,
+ bkt: bkt,
+ concurrency: concurrency,
+ skipBlocksWithOutOfOrderChunks: skipBlocksWithOutOfOrderChunks,
}, nil
}
@@ -939,6 +973,23 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) {
continue
}
}
+ // If block has out of order chunk and it has been configured to skip it,
+ // then we can mark the block for no compaction so that the next compaction run
+ // will skip it.
+ if IsOutOfOrderChunkError(err) && c.skipBlocksWithOutOfOrderChunks {
+ if err := block.MarkForNoCompact(
+ ctx,
+ c.logger,
+ c.bkt,
+ err.(OutOfOrderChunksError).id,
+ metadata.OutOfOrderChunksNoCompactReason,
+ "OutofOrderChunk: marking block with out-of-order series/chunks to as no compact to unblock compaction", g.blocksMarkedForNoCompact); err == nil {
+ mtx.Lock()
+ finishedAllGroups = false
+ mtx.Unlock()
+ continue
+ }
+ }
errChan <- errors.Wrapf(err, "group %s", g.Key())
return
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/azure.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/azure.go
index 5c0b73bc6e..4b42f2283c 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/azure.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/azure.go
@@ -216,7 +216,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt
for i := 1; ; i++ {
var (
blobPrefixes []blob.BlobPrefix
- blobItems []blob.BlobItem
+ blobItems []blob.BlobItemInternal
)
if params.Recursive {
@@ -294,12 +294,12 @@ func (b *Bucket) getBlobReader(ctx context.Context, name string, offset, length
return nil, errors.New("X-Ms-Error-Code: [BlobNotFound]")
}
- blobURL, err := getBlobURL(ctx, *b.config, name)
+ blobURL := getBlobURL(name, b.containerURL)
if err != nil {
return nil, errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name)
}
var props *blob.BlobGetPropertiesResponse
- props, err = blobURL.GetProperties(ctx, blob.BlobAccessConditions{})
+ props, err = blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{})
if err != nil {
return nil, errors.Wrapf(err, "cannot get properties for container: %s", name)
}
@@ -345,13 +345,9 @@ func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) {
- blobURL, err := getBlobURL(ctx, *b.config, name)
- if err != nil {
- return objstore.ObjectAttributes{}, errors.Wrapf(err, "cannot get Azure blob URL, blob: %s", name)
- }
+ blobURL := getBlobURL(name, b.containerURL)
- var props *blob.BlobGetPropertiesResponse
- props, err = blobURL.GetProperties(ctx, blob.BlobAccessConditions{})
+ props, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{})
if err != nil {
return objstore.ObjectAttributes{}, err
}
@@ -365,12 +361,9 @@ func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAt
// Exists checks if the given object exists.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
level.Debug(b.logger).Log("msg", "check if blob exists", "blob", name)
- blobURL, err := getBlobURL(ctx, *b.config, name)
- if err != nil {
- return false, errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name)
- }
+ blobURL := getBlobURL(name, b.containerURL)
- if _, err = blobURL.GetProperties(ctx, blob.BlobAccessConditions{}); err != nil {
+ if _, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}); err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
@@ -383,11 +376,9 @@ func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
level.Debug(b.logger).Log("msg", "Uploading blob", "blob", name)
- blobURL, err := getBlobURL(ctx, *b.config, name)
- if err != nil {
- return errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name)
- }
- if _, err = blob.UploadStreamToBlockBlob(ctx, r, blobURL,
+ blobURL := getBlobURL(name, b.containerURL)
+
+ if _, err := blob.UploadStreamToBlockBlob(ctx, r, blobURL,
blob.UploadStreamToBlockBlobOptions{
BufferSize: 3 * 1024 * 1024,
MaxBuffers: 4,
@@ -401,12 +392,9 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
level.Debug(b.logger).Log("msg", "Deleting blob", "blob", name)
- blobURL, err := getBlobURL(ctx, *b.config, name)
- if err != nil {
- return errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name)
- }
+ blobURL := getBlobURL(name, b.containerURL)
- if _, err = blobURL.Delete(ctx, blob.DeleteSnapshotsOptionInclude, blob.BlobAccessConditions{}); err != nil {
+ if _, err := blobURL.Delete(ctx, blob.DeleteSnapshotsOptionInclude, blob.BlobAccessConditions{}); err != nil {
return errors.Wrapf(err, "error deleting blob, address: %s", name)
}
return nil
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go
index 2138175d77..00a2c1e03c 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go
@@ -156,12 +156,8 @@ func createContainer(ctx context.Context, conf Config) (blob.ContainerURL, error
return c, err
}
-func getBlobURL(ctx context.Context, conf Config, blobName string) (blob.BlockBlobURL, error) {
- c, err := getContainerURL(ctx, conf)
- if err != nil {
- return blob.BlockBlobURL{}, err
- }
- return c.NewBlockBlobURL(blobName), nil
+func getBlobURL(blobName string, c blob.ContainerURL) blob.BlockBlobURL {
+ return c.NewBlockBlobURL(blobName)
}
func parseError(errorCode string) string {
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go
index 6854f15ca9..c76552c60a 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go
@@ -4,6 +4,7 @@
package objstore
import (
+ "bytes"
"context"
"fmt"
"io/ioutil"
@@ -21,7 +22,7 @@ func CreateTemporaryTestBucketName(t testing.TB) string {
src := rand.NewSource(time.Now().UnixNano())
// Bucket name need to conform: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html.
- name := strings.Replace(strings.Replace(fmt.Sprintf("test_%x_%s", src.Int63(), strings.ToLower(t.Name())), "_", "-", -1), "/", "-", -1)
+ name := strings.ReplaceAll(strings.Replace(fmt.Sprintf("test_%x_%s", src.Int63(), strings.ToLower(t.Name())), "_", "-", -1), "/", "-")
if len(name) >= 63 {
name = name[:63]
}
@@ -245,4 +246,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) {
sort.Strings(expected)
sort.Strings(seen)
testutil.Equals(t, expected, seen)
+
+ testutil.Ok(t, bkt.Upload(ctx, "obj_6.som", bytes.NewReader(make([]byte, 1024*1024*200))))
+ testutil.Ok(t, bkt.Delete(ctx, "obj_6.som"))
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go
index 989c09b6d5..614bf9df68 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go
@@ -26,6 +26,7 @@ import (
"github.com/gogo/status"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/promql"
@@ -175,16 +176,12 @@ func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labe
if err := json.Unmarshal(body, &d); err != nil {
return nil, errors.Wrapf(err, "unmarshal response: %v", string(body))
}
- var cfg struct {
- Global struct {
- ExternalLabels map[string]string `yaml:"external_labels"`
- } `yaml:"global"`
- }
+ var cfg config.Config
if err := yaml.Unmarshal([]byte(d.Data.YAML), &cfg); err != nil {
return nil, errors.Wrapf(err, "parse Prometheus config: %v", d.Data.YAML)
}
- lset := labels.FromMap(cfg.Global.ExternalLabels)
+ lset := cfg.GlobalConfig.ExternalLabels
sort.Sort(lset)
return lset, nil
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
index fef930ee1d..8255e1dc18 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
@@ -403,7 +403,8 @@ func NewBucketStore(
}
// Depend on the options
- s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, extprom.WrapRegistererWithPrefix("thanos_bucket_store_", s.reg))
+ indexReaderPoolMetrics := indexheader.NewReaderPoolMetrics(extprom.WrapRegistererWithPrefix("thanos_bucket_store_", s.reg))
+ s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics)
s.metrics = newBucketStoreMetrics(s.reg) // TODO(metalmatze): Might be possible via Option too
if err := os.MkdirAll(dir, 0750); err != nil {
diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go
index 7f0e0a8911..9a649436ea 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go
@@ -4,13 +4,21 @@
package testutil
import (
+ "context"
"fmt"
+ "math/rand"
"path/filepath"
"reflect"
"runtime"
"runtime/debug"
+ "sort"
"testing"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/index"
+
"github.com/davecgh/go-spew/spew"
"github.com/pkg/errors"
"github.com/pmezard/go-difflib/difflib"
@@ -173,6 +181,8 @@ func TolerantVerifyLeakMain(m *testing.M) {
// https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
+ // https://github.com/baidubce/bce-sdk-go/blob/9a8c1139e6a3ad23080b9b8c51dec88df8ce3cda/util/log/logger.go#L359
+ goleak.IgnoreTopFunction("github.com/baidubce/bce-sdk-go/util/log.NewLogger.func1"),
)
}
@@ -185,6 +195,8 @@ func TolerantVerifyLeak(t *testing.T) {
// https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
+ // https://github.com/baidubce/bce-sdk-go/blob/9a8c1139e6a3ad23080b9b8c51dec88df8ce3cda/util/log/logger.go#L359
+ goleak.IgnoreTopFunction("github.com/baidubce/bce-sdk-go/util/log.NewLogger.func1"),
)
}
@@ -203,3 +215,105 @@ func FaultOrPanicToErr(f func()) (err error) {
return err
}
+
+var indexFilename = "index"
+
+type indexWriterSeries struct {
+ labels labels.Labels
+ chunks []chunks.Meta // series file offset of chunks
+}
+
+type indexWriterSeriesSlice []*indexWriterSeries
+
+// PutOutOfOrderIndex updates the index in blockDir with an index containing an out-of-order chunk
+// copied from https://github.com/prometheus/prometheus/blob/b1ed4a0a663d0c62526312311c7529471abbc565/tsdb/index/index_test.go#L346
+func PutOutOfOrderIndex(blockDir string, minTime int64, maxTime int64) error {
+
+ if minTime >= maxTime || minTime+4 >= maxTime {
+ return fmt.Errorf("minTime must be at least 4 less than maxTime to not create overlapping chunks")
+ }
+
+ lbls := []labels.Labels{
+ []labels.Label{
+ {Name: "lbl1", Value: "1"},
+ },
+ }
+
+ // Sort labels as the index writer expects series in sorted order.
+ sort.Sort(labels.Slice(lbls))
+
+ symbols := map[string]struct{}{}
+ for _, lset := range lbls {
+ for _, l := range lset {
+ symbols[l.Name] = struct{}{}
+ symbols[l.Value] = struct{}{}
+ }
+ }
+
+ var input indexWriterSeriesSlice
+
+ // Generate ChunkMetas for every label set.
+ for _, lset := range lbls {
+ var metas []chunks.Meta
+ // only need two chunks that are out-of-order
+ chk1 := chunks.Meta{
+ MinTime: maxTime - 2,
+ MaxTime: maxTime - 1,
+ Ref: rand.Uint64(),
+ Chunk: chunkenc.NewXORChunk(),
+ }
+ metas = append(metas, chk1)
+ chk2 := chunks.Meta{
+ MinTime: minTime + 1,
+ MaxTime: minTime + 2,
+ Ref: rand.Uint64(),
+ Chunk: chunkenc.NewXORChunk(),
+ }
+ metas = append(metas, chk2)
+
+ input = append(input, &indexWriterSeries{
+ labels: lset,
+ chunks: metas,
+ })
+ }
+
+ iw, err := index.NewWriter(context.Background(), filepath.Join(blockDir, indexFilename))
+ if err != nil {
+ return err
+ }
+
+ syms := []string{}
+ for s := range symbols {
+ syms = append(syms, s)
+ }
+ sort.Strings(syms)
+ for _, s := range syms {
+ if err := iw.AddSymbol(s); err != nil {
+ return err
+ }
+ }
+
+ // Population procedure as done by compaction.
+ var (
+ postings = index.NewMemPostings()
+ values = map[string]map[string]struct{}{}
+ )
+
+ for i, s := range input {
+ if err := iw.AddSeries(uint64(i), s.labels, s.chunks...); err != nil {
+ return err
+ }
+
+ for _, l := range s.labels {
+ valset, ok := values[l.Name]
+ if !ok {
+ valset = map[string]struct{}{}
+ values[l.Name] = valset
+ }
+ valset[l.Value] = struct{}{}
+ }
+ postings.Add(uint64(i), s.labels)
+ }
+
+ return iw.Close()
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8c76fc7b35..3cc227e6d5 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -16,10 +16,10 @@ cloud.google.com/go/bigtable/internal/option
# cloud.google.com/go/storage v1.10.0
## explicit
cloud.google.com/go/storage
-# github.com/Azure/azure-pipeline-go v0.2.2
+# github.com/Azure/azure-pipeline-go v0.2.3
## explicit
github.com/Azure/azure-pipeline-go/pipeline
-# github.com/Azure/azure-storage-blob-go v0.8.0
+# github.com/Azure/azure-storage-blob-go v0.13.0
## explicit
github.com/Azure/azure-storage-blob-go/azblob
# github.com/Azure/go-autorest v14.2.0+incompatible
@@ -387,7 +387,7 @@ github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
# github.com/mattn/go-colorable v0.1.6
github.com/mattn/go-colorable
-# github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe
+# github.com/mattn/go-ieproxy v0.0.1
github.com/mattn/go-ieproxy
# github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty
@@ -605,7 +605,7 @@ github.com/stretchr/objx
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
-# github.com/thanos-io/thanos v0.22.0
+# github.com/thanos-io/thanos v0.19.1-0.20210827151736-fdfc0776d0c3
## explicit
github.com/thanos-io/thanos/pkg/block
github.com/thanos-io/thanos/pkg/block/indexheader
@@ -619,6 +619,7 @@ github.com/thanos-io/thanos/pkg/discovery/cache
github.com/thanos-io/thanos/pkg/discovery/dns
github.com/thanos-io/thanos/pkg/discovery/dns/godns
github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns
+github.com/thanos-io/thanos/pkg/discovery/memcache
github.com/thanos-io/thanos/pkg/errutil
github.com/thanos-io/thanos/pkg/exemplars/exemplarspb
github.com/thanos-io/thanos/pkg/extprom