Certification tests for Azure Blob Storage Binding (#1435)
* initial blobstorage output binding certification Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * More blobstorage tests Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Rename some remnants from copy paste Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Blobstorage Binding - test file upload Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Blobstorage Output Binding Remove Pascal case Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Blobstorage Output Binding: Remove list from conf test Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Azure Blobstorage Binding - include name upon creation Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * blobstore binding - verify encoding option Signed-off-by: GitHub <noreply@github.com> Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * go mod tidy Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Additional test cases Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * list blob tests WIP Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * More stable certification tests for AzBlob Binding Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Remaining Blob Storage Binding certification tests Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Lint blob storage certification tests Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * update go.mods Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Fix license header Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * use byte string instead of file Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> * Run make modtidy-all again Signed-off-by: Bernd Verst <4535280+berndverst@users.noreply.github.com> Co-authored-by: Artur Souza <artursouza.ms@outlook.com>
This commit is contained in:
parent
2d52913aa9
commit
0f2398670a
|
|
@ -24,3 +24,14 @@ resource storageAccount 'Microsoft.Storage/storageAccounts@2021-02-01' = {
|
|||
location: rgLocation
|
||||
tags: confTestTags
|
||||
}
|
||||
|
||||
resource blobServices 'Microsoft.Storage/storageAccounts/blobServices@2021-02-01' = {
|
||||
parent: storageAccount
|
||||
name: 'default'
|
||||
properties: {
|
||||
deleteRetentionPolicy: {
|
||||
enabled: true
|
||||
days: 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ echo "INFO: SQL_SERVER_NAME=${SQL_SERVER_NAME}"
|
|||
SQL_SERVER_ADMIN_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.sqlServerAdminName.value" --output tsv)"
|
||||
echo "INFO: SQL_SERVER_ADMIN_NAME=${SQL_SERVER_ADMIN_NAME}"
|
||||
AZURE_CONTAINER_REGISTRY_NAME="$(az deployment sub show --name "${DEPLOY_NAME}" --query "properties.outputs.acrName.value" --output tsv)"
|
||||
echo "INFO: AZURE_CONTAINER_REGISTRY_NAME=${CONTAINER_REGISTRY_NAME}"
|
||||
echo "INFO: AZURE_CONTAINER_REGISTRY_NAME=${AZURE_CONTAINER_REGISTRY_NAME}"
|
||||
|
||||
# Give the service principal used by the SDK write access to the entire resource group
|
||||
MSYS_NO_PATHCONV=1 az role assignment create --assignee "${SDK_AUTH_SP_ID}" --role "Contributor" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}"
|
||||
|
|
@ -646,6 +646,7 @@ CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID="$(az ad sp list --display-name "${CERTIFIC
|
|||
|
||||
# Give the service principal used for certification test access to the relevant data plane resources
|
||||
az cosmosdb sql role assignment create --account-name ${COSMOS_DB_NAME} --resource-group "${RESOURCE_GROUP_NAME}" --role-definition-name "Cosmos DB Built-in Data Contributor" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.DocumentDB/databaseAccounts/${COSMOS_DB_NAME}" --principal-id "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}"
|
||||
az role assignment create --assignee "${CERTIFICATION_SPAUTH_SP_PRINCIPAL_ID}" --role "Storage Blob Data Contributor" --scope "/subscriptions/${SUB_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.Storage/storageAccounts/${STORAGE_NAME}/blobServices/default/containers/${STORAGE_CONTAINER_NAME}"
|
||||
|
||||
# Now export the service principal information
|
||||
CERTIFICATION_TENANT_ID="$(az ad sp list --display-name "${CERTIFICATION_SPAUTH_SP_NAME}" --query "[].appOwnerTenantId" --output tsv)"
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/google/uuid"
|
||||
|
||||
azauth "github.com/dapr/components-contrib/authentication/azure"
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/kit/logger"
|
||||
)
|
||||
|
|
@ -52,24 +53,15 @@ const (
|
|||
metadataKeyContentEncoding = "contentEncoding"
|
||||
metadataKeyContentLanguage = "contentLanguage"
|
||||
metadataKeyContentDisposition = "contentDisposition"
|
||||
meatdataKeyCacheControl = "cacheControl"
|
||||
metadataKeyCacheControl = "cacheControl"
|
||||
// Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader. A value
|
||||
// of zero means that no additional HTTP GET requests will be made.
|
||||
defaultGetBlobRetryCount = 10
|
||||
// Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not
|
||||
// specify maxresults the server will return up to 5,000 items.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs#uri-parameters
|
||||
maxResults = 5000
|
||||
|
||||
// TODO: remove the pascal case support when the component moves to GA
|
||||
// See: https://github.com/dapr/components-contrib/pull/999#issuecomment-876890210
|
||||
metadataKeyContentTypeBC = "ContentType"
|
||||
metadataKeyContentMD5BC = "ContentMD5"
|
||||
metadataKeyContentEncodingBC = "ContentEncoding"
|
||||
metadataKeyContentLanguageBC = "ContentLanguage"
|
||||
metadataKeyContentDispositionBC = "ContentDisposition"
|
||||
metadataKeyCacheControlBC = "CacheControl"
|
||||
metadataKeyDeleteSnapshotOptionsBC = "DeleteSnapshotOptions"
|
||||
maxResults = 5000
|
||||
endpointKey = "endpoint"
|
||||
)
|
||||
|
||||
var ErrMissingBlobName = errors.New("blobName is a required attribute")
|
||||
|
|
@ -92,7 +84,8 @@ type blobStorageMetadata struct {
|
|||
}
|
||||
|
||||
type createResponse struct {
|
||||
BlobURL string `json:"blobURL"`
|
||||
BlobURL string `json:"blobURL"`
|
||||
BlobName string `json:"blobName"`
|
||||
}
|
||||
|
||||
type listInclude struct {
|
||||
|
|
@ -123,9 +116,12 @@ func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error {
|
|||
}
|
||||
a.metadata = m
|
||||
|
||||
credential, err := azblob.NewSharedKeyCredential(m.StorageAccount, m.StorageAccessKey)
|
||||
if m.StorageAccessKey != "" {
|
||||
metadata.Properties["accountKey"] = m.StorageAccessKey
|
||||
}
|
||||
credential, env, err := azauth.GetAzureStorageCredentials(a.logger, m.StorageAccount, metadata.Properties)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid credentials with error: %w", err)
|
||||
return fmt.Errorf("invalid credentials with error: %s", err.Error())
|
||||
}
|
||||
|
||||
userAgent := "dapr-" + logger.DaprVersion
|
||||
|
|
@ -134,10 +130,18 @@ func (a *AzureBlobStorage) Init(metadata bindings.Metadata) error {
|
|||
}
|
||||
p := azblob.NewPipeline(credential, options)
|
||||
|
||||
containerName := a.metadata.Container
|
||||
URL, _ := url.Parse(
|
||||
fmt.Sprintf("https://%s.blob.core.windows.net/%s", m.StorageAccount, containerName))
|
||||
containerURL := azblob.NewContainerURL(*URL, p)
|
||||
var containerURL azblob.ContainerURL
|
||||
customEndpoint, ok := metadata.Properties[endpointKey]
|
||||
if ok && customEndpoint != "" {
|
||||
URL, parseErr := url.Parse(fmt.Sprintf("%s/%s/%s", customEndpoint, m.StorageAccount, m.Container))
|
||||
if parseErr != nil {
|
||||
return parseErr
|
||||
}
|
||||
containerURL = azblob.NewContainerURL(*URL, p)
|
||||
} else {
|
||||
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.%s/%s", m.StorageAccount, env.StorageEndpointSuffix, m.Container))
|
||||
containerURL = azblob.NewContainerURL(*URL, p)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = containerURL.Create(ctx, azblob.Metadata{}, m.PublicAccessLevel)
|
||||
|
|
@ -165,6 +169,10 @@ func (a *AzureBlobStorage) parseMetadata(metadata bindings.Metadata) (*blobStora
|
|||
m.GetBlobRetryCount = defaultGetBlobRetryCount
|
||||
}
|
||||
|
||||
// per the Dapr documentation "none" is a valid value
|
||||
if m.PublicAccessLevel == "none" {
|
||||
m.PublicAccessLevel = ""
|
||||
}
|
||||
if !a.isValidPublicAccessType(m.PublicAccessLevel) {
|
||||
return nil, fmt.Errorf("invalid public access level: %s; allowed: %s",
|
||||
m.PublicAccessLevel, azblob.PossiblePublicAccessTypeValues())
|
||||
|
|
@ -185,12 +193,14 @@ func (a *AzureBlobStorage) Operations() []bindings.OperationKind {
|
|||
func (a *AzureBlobStorage) create(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
var blobHTTPHeaders azblob.BlobHTTPHeaders
|
||||
var blobURL azblob.BlockBlobURL
|
||||
var blobName string
|
||||
if val, ok := req.Metadata[metadataKeyBlobName]; ok && val != "" {
|
||||
blobURL = a.getBlobURL(val)
|
||||
blobName = val
|
||||
delete(req.Metadata, metadataKeyBlobName)
|
||||
} else {
|
||||
blobURL = a.getBlobURL(uuid.New().String())
|
||||
blobName = uuid.New().String()
|
||||
}
|
||||
blobURL = a.getBlobURL(blobName)
|
||||
|
||||
if val, ok := req.Metadata[metadataKeyContentType]; ok && val != "" {
|
||||
blobHTTPHeaders.ContentType = val
|
||||
|
|
@ -216,9 +226,9 @@ func (a *AzureBlobStorage) create(req *bindings.InvokeRequest) (*bindings.Invoke
|
|||
blobHTTPHeaders.ContentDisposition = val
|
||||
delete(req.Metadata, metadataKeyContentDisposition)
|
||||
}
|
||||
if val, ok := req.Metadata[meatdataKeyCacheControl]; ok && val != "" {
|
||||
if val, ok := req.Metadata[metadataKeyCacheControl]; ok && val != "" {
|
||||
blobHTTPHeaders.CacheControl = val
|
||||
delete(req.Metadata, meatdataKeyCacheControl)
|
||||
delete(req.Metadata, metadataKeyCacheControl)
|
||||
}
|
||||
|
||||
d, err := strconv.Unquote(string(req.Data))
|
||||
|
|
@ -251,8 +261,13 @@ func (a *AzureBlobStorage) create(req *bindings.InvokeRequest) (*bindings.Invoke
|
|||
return nil, fmt.Errorf("error marshalling create response for azure blob: %w", err)
|
||||
}
|
||||
|
||||
createResponseMetadata := map[string]string{
|
||||
"blobName": blobName,
|
||||
}
|
||||
|
||||
return &bindings.InvokeResponse{
|
||||
Data: b,
|
||||
Data: b,
|
||||
Metadata: createResponseMetadata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -324,30 +339,36 @@ func (a *AzureBlobStorage) delete(req *bindings.InvokeRequest) (*bindings.Invoke
|
|||
func (a *AzureBlobStorage) list(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{}
|
||||
|
||||
hasPayload := false
|
||||
var payload listPayload
|
||||
err := json.Unmarshal(req.Data, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if req.Data != nil {
|
||||
err := json.Unmarshal(req.Data, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasPayload = true
|
||||
}
|
||||
|
||||
options.Details.Copy = payload.Include.Copy
|
||||
options.Details.Metadata = payload.Include.Metadata
|
||||
options.Details.Snapshots = payload.Include.Snapshots
|
||||
options.Details.UncommittedBlobs = payload.Include.UncommittedBlobs
|
||||
options.Details.Deleted = payload.Include.Deleted
|
||||
if hasPayload {
|
||||
options.Details.Copy = payload.Include.Copy
|
||||
options.Details.Metadata = payload.Include.Metadata
|
||||
options.Details.Snapshots = payload.Include.Snapshots
|
||||
options.Details.UncommittedBlobs = payload.Include.UncommittedBlobs
|
||||
options.Details.Deleted = payload.Include.Deleted
|
||||
}
|
||||
|
||||
if payload.MaxResults != int32(0) {
|
||||
if hasPayload && payload.MaxResults != int32(0) {
|
||||
options.MaxResults = payload.MaxResults
|
||||
} else {
|
||||
options.MaxResults = maxResults
|
||||
}
|
||||
|
||||
if payload.Prefix != "" {
|
||||
if hasPayload && payload.Prefix != "" {
|
||||
options.Prefix = payload.Prefix
|
||||
}
|
||||
|
||||
var initialMarker azblob.Marker
|
||||
if payload.Marker != "" {
|
||||
if hasPayload && payload.Marker != "" {
|
||||
initialMarker = azblob.Marker{Val: &payload.Marker}
|
||||
} else {
|
||||
initialMarker = azblob.Marker{}
|
||||
|
|
@ -358,7 +379,7 @@ func (a *AzureBlobStorage) list(req *bindings.InvokeRequest) (*bindings.InvokeRe
|
|||
ctx := context.Background()
|
||||
for currentMaker := initialMarker; currentMaker.NotDone(); {
|
||||
var listBlob *azblob.ListBlobsFlatSegmentResponse
|
||||
listBlob, err = a.containerURL.ListBlobsFlatSegment(ctx, currentMaker, options)
|
||||
listBlob, err := a.containerURL.ListBlobsFlatSegment(ctx, currentMaker, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing blobs: %w", err)
|
||||
}
|
||||
|
|
@ -389,8 +410,6 @@ func (a *AzureBlobStorage) list(req *bindings.InvokeRequest) (*bindings.InvokeRe
|
|||
}
|
||||
|
||||
func (a *AzureBlobStorage) Invoke(req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) {
|
||||
req.Metadata = a.handleBackwardCompatibilityForMetadata(req.Metadata)
|
||||
|
||||
switch req.Operation {
|
||||
case bindings.CreateOperation:
|
||||
return a.create(req)
|
||||
|
|
@ -432,44 +451,3 @@ func (a *AzureBlobStorage) isValidDeleteSnapshotsOptionType(accessType azblob.De
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: remove the pascal case support when the component moves to GA
|
||||
// See: https://github.com/dapr/components-contrib/pull/999#issuecomment-876890210
|
||||
func (a *AzureBlobStorage) handleBackwardCompatibilityForMetadata(metadata map[string]string) map[string]string {
|
||||
if val, ok := metadata[metadataKeyContentTypeBC]; ok && val != "" {
|
||||
metadata[metadataKeyContentType] = val
|
||||
delete(metadata, metadataKeyContentTypeBC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyContentMD5BC]; ok && val != "" {
|
||||
metadata[metadataKeyContentMD5] = val
|
||||
delete(metadata, metadataKeyContentMD5BC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyContentEncodingBC]; ok && val != "" {
|
||||
metadata[metadataKeyContentEncoding] = val
|
||||
delete(metadata, metadataKeyContentEncodingBC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyContentLanguageBC]; ok && val != "" {
|
||||
metadata[metadataKeyContentLanguage] = val
|
||||
delete(metadata, metadataKeyContentLanguageBC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyContentDispositionBC]; ok && val != "" {
|
||||
metadata[metadataKeyContentDisposition] = val
|
||||
delete(metadata, metadataKeyContentDispositionBC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyCacheControlBC]; ok && val != "" {
|
||||
metadata[meatdataKeyCacheControl] = val
|
||||
delete(metadata, metadataKeyCacheControlBC)
|
||||
}
|
||||
|
||||
if val, ok := metadata[metadataKeyDeleteSnapshotOptionsBC]; ok && val != "" {
|
||||
metadata[metadataKeyDeleteSnapshots] = val
|
||||
delete(metadata, metadataKeyDeleteSnapshotOptionsBC)
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
# Azure Blobstorage Binding certification testing
|
||||
|
||||
This project aims to test the Azure Blobstorage binding component under various conditions.
|
||||
|
||||
## Test plan
|
||||
|
||||
### Authentication tests
|
||||
|
||||
* Authenticate with Azure Active Directory using Service Principal Client Secret
|
||||
* Authenticate with Storage Account Key
|
||||
|
||||
### Functional tests
|
||||
|
||||
- Delete request:
|
||||
- Successful deletion
|
||||
- File does not exist
|
||||
- Snapshots only
|
||||
- Base blob with its snapshots
|
||||
|
||||
- Get request:
|
||||
- Successful Get Request
|
||||
- Item does not exist
|
||||
|
||||
- Create request:
|
||||
- No filename specified
|
||||
- Filename specified
|
||||
- Correct MD5 Hash Provided
|
||||
- Incorrect MD5 hash specified
|
||||
- Existing file name specified (overwrites content)
|
||||
- Creating a public blob (depending on container setting)
|
||||
- Verifies automatic base64 decode option
|
||||
|
||||
- List request:
|
||||
- include custom metadata
|
||||
- include soft-deleted blobs
|
||||
- include snapshots
|
||||
- specify max results and use marker to retrieve more results
|
||||
- filter results by specifying a prefix
|
||||
|
||||
### Running the tests
|
||||
|
||||
This must be run in the GitHub Actions Workflow configured for test infrastructure setup.
|
||||
|
||||
If you have access to an Azure subscription you can run this locally on Mac or Linux after running `setup-azure-conf-test.sh` in `.github/infrastructure/conformance/azure` and then sourcing the generated bash rc file.
|
||||
|
|
@ -0,0 +1,729 @@
|
|||
/*
|
||||
Copyright 2022 The Dapr Authors
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azureblobstoragebinding_test
|
||||
|
||||
import (
|
||||
"crypto/md5" // nolint:gosec
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/dapr/components-contrib/bindings"
|
||||
"github.com/dapr/components-contrib/bindings/azure/blobstorage"
|
||||
"github.com/dapr/components-contrib/secretstores"
|
||||
secretstore_env "github.com/dapr/components-contrib/secretstores/local/env"
|
||||
bindings_loader "github.com/dapr/dapr/pkg/components/bindings"
|
||||
secretstores_loader "github.com/dapr/dapr/pkg/components/secretstores"
|
||||
"github.com/dapr/dapr/pkg/runtime"
|
||||
dapr_testing "github.com/dapr/dapr/pkg/testing"
|
||||
daprsdk "github.com/dapr/go-sdk/client"
|
||||
"github.com/dapr/kit/logger"
|
||||
|
||||
"github.com/dapr/components-contrib/tests/certification/embedded"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow"
|
||||
"github.com/dapr/components-contrib/tests/certification/flow/sidecar"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
)
|
||||
|
||||
const (
|
||||
sidecarName = "blobstorage-sidecar"
|
||||
)
|
||||
|
||||
// getBlobRequest is used to make a common binding request for the get operation.
|
||||
func getBlobRequest(ctx flow.Context, client daprsdk.Client, name string, includeMetadata bool) (out *daprsdk.BindingEvent, err error) {
|
||||
fetchMetdata := fmt.Sprint(includeMetadata)
|
||||
invokeGetMetadata := map[string]string{
|
||||
"blobName": name,
|
||||
"includeMetadata": fetchMetdata,
|
||||
}
|
||||
|
||||
invokeGetRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "get",
|
||||
Data: nil,
|
||||
Metadata: invokeGetMetadata,
|
||||
}
|
||||
|
||||
out, invokeGetErr := client.InvokeBinding(ctx, invokeGetRequest)
|
||||
if invokeGetErr != nil {
|
||||
return nil, fmt.Errorf("%w", invokeGetErr)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// listBlobRequest is used to make a common binding request for the list operation.
|
||||
func listBlobRequest(ctx flow.Context, client daprsdk.Client, prefix string, marker string, maxResults int, includeMetadata bool, includeSnapshots bool, includeUncommittedBlobs bool, includeCopy bool, includeDeleted bool) (out *daprsdk.BindingEvent, err error) {
|
||||
requestOptions := make(map[string]interface{})
|
||||
|
||||
requestOptions["prefix"] = prefix
|
||||
requestOptions["marker"] = marker
|
||||
if maxResults > -1 {
|
||||
requestOptions["maxResults"] = maxResults
|
||||
}
|
||||
includeOptions := make(map[string]interface{})
|
||||
includeOptions["Snapshots"] = includeSnapshots
|
||||
includeOptions["UncommittedBlobs"] = includeUncommittedBlobs
|
||||
includeOptions["Copy"] = includeCopy
|
||||
includeOptions["Deleted"] = includeDeleted
|
||||
includeOptions["Metadata"] = includeMetadata
|
||||
requestOptions["Include"] = includeOptions
|
||||
|
||||
optionsBytes, marshalErr := json.Marshal(requestOptions)
|
||||
if marshalErr != nil {
|
||||
return nil, fmt.Errorf("%w", marshalErr)
|
||||
}
|
||||
|
||||
invokeRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "list",
|
||||
Data: optionsBytes,
|
||||
Metadata: nil,
|
||||
}
|
||||
|
||||
out, invokeErr := client.InvokeBinding(ctx, invokeRequest)
|
||||
if invokeErr != nil {
|
||||
return nil, fmt.Errorf("%w", invokeErr)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// deleteBlobRequest is used to make a common binding request for the delete operation.
|
||||
func deleteBlobRequest(ctx flow.Context, client daprsdk.Client, name string, deleteSnapshotsOption string) (out *daprsdk.BindingEvent, err error) {
|
||||
invokeDeleteMetadata := map[string]string{
|
||||
"blobName": name,
|
||||
"deleteSnapshots": deleteSnapshotsOption,
|
||||
}
|
||||
|
||||
invokeGetRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "delete",
|
||||
Data: nil,
|
||||
Metadata: invokeDeleteMetadata,
|
||||
}
|
||||
|
||||
out, invokeDeleteErr := client.InvokeBinding(ctx, invokeGetRequest)
|
||||
if invokeDeleteErr != nil {
|
||||
return nil, fmt.Errorf("%w", invokeDeleteErr)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func TestBlobStorage(t *testing.T) {
|
||||
ports, err := dapr_testing.GetFreePorts(2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
currentGRPCPort := ports[0]
|
||||
currentHTTPPort := ports[1]
|
||||
|
||||
log := logger.NewLogger("dapr.components")
|
||||
|
||||
testCreateBlobWithFileNameConflict := func(ctx flow.Context) error {
|
||||
// verifies that overwriting a blob with the same name will not cause a conflict.
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
input := "some example content"
|
||||
dataBytes := []byte(input)
|
||||
|
||||
invokeCreateMetadata := map[string]string{
|
||||
"contentType": "text/plain",
|
||||
}
|
||||
|
||||
invokeCreateRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: dataBytes,
|
||||
Metadata: invokeCreateMetadata,
|
||||
}
|
||||
|
||||
out, invokeCreateErr := client.InvokeBinding(ctx, invokeCreateRequest)
|
||||
assert.NoError(t, invokeCreateErr)
|
||||
|
||||
blobName := out.Metadata["blobName"]
|
||||
res, _ := getBlobRequest(ctx, client, blobName, false)
|
||||
oldString := string(res.Data)
|
||||
|
||||
input2 := "some other example content"
|
||||
dataBytes2 := []byte(input2)
|
||||
|
||||
invokeCreateMetadata2 := map[string]string{
|
||||
"blobName": blobName,
|
||||
"contentType": "text/plain",
|
||||
}
|
||||
|
||||
invokeCreateRequest2 := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: dataBytes2,
|
||||
Metadata: invokeCreateMetadata2,
|
||||
}
|
||||
_, invokeCreateErr2 := client.InvokeBinding(ctx, invokeCreateRequest2)
|
||||
|
||||
assert.NoError(t, invokeCreateErr2)
|
||||
|
||||
res2, _ := getBlobRequest(ctx, client, blobName, false)
|
||||
newString := string(res2.Data)
|
||||
|
||||
assert.NotEqual(t, oldString, newString)
|
||||
assert.Equal(t, newString, input2)
|
||||
|
||||
// cleanup.
|
||||
out, invokeDeleteErr := deleteBlobRequest(ctx, client, blobName, "")
|
||||
assert.NoError(t, invokeDeleteErr)
|
||||
assert.Empty(t, out.Data)
|
||||
|
||||
// confirm the deletion.
|
||||
_, invokeSecondGetErr := getBlobRequest(ctx, client, blobName, false)
|
||||
assert.Error(t, invokeSecondGetErr)
|
||||
assert.Contains(t, invokeSecondGetErr.Error(), "ServiceCode=BlobNotFound")
|
||||
|
||||
// deleting the key again should fail.
|
||||
_, invokeDeleteErr2 := deleteBlobRequest(ctx, client, blobName, "")
|
||||
assert.Error(t, invokeDeleteErr2)
|
||||
assert.Contains(t, invokeDeleteErr2.Error(), "ServiceCode=BlobNotFound")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
testCreateBlobInvalidContentHash := func(ctx flow.Context) error {
|
||||
// verifies that the content hash is validated.
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
input := "some example content"
|
||||
dataBytes := []byte(input)
|
||||
wrongBytesForContentHash := []byte("wrong content to hash")
|
||||
h := md5.New() // nolint:gosec
|
||||
h.Write(wrongBytesForContentHash)
|
||||
md5HashBase64 := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
invokeCreateMetadata := map[string]string{
|
||||
"contentMD5": md5HashBase64,
|
||||
}
|
||||
|
||||
invokeCreateRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: dataBytes,
|
||||
Metadata: invokeCreateMetadata,
|
||||
}
|
||||
|
||||
_, invokeCreateErr := client.InvokeBinding(ctx, invokeCreateRequest)
|
||||
assert.Error(t, invokeCreateErr)
|
||||
assert.Contains(t, invokeCreateErr.Error(), "ServiceCode=Md5Mismatch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
testCreateBlobFromFile := func(isBase64 bool) func(ctx flow.Context) error {
|
||||
// uploads a blob from a file and optionally verifies the automatic base64 decoding option.
|
||||
return func(ctx flow.Context) error {
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
dataBytes := []byte("somecontent")
|
||||
if isBase64 {
|
||||
dataBytes = []byte(base64.StdEncoding.EncodeToString(dataBytes))
|
||||
}
|
||||
|
||||
invokeCreateRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: dataBytes,
|
||||
Metadata: nil,
|
||||
}
|
||||
|
||||
out, invokeCreateErr := client.InvokeBinding(ctx, invokeCreateRequest)
|
||||
assert.NoError(t, invokeCreateErr)
|
||||
|
||||
blobName := out.Metadata["blobName"]
|
||||
|
||||
out, invokeGetErr := getBlobRequest(ctx, client, blobName, false)
|
||||
assert.NoError(t, invokeGetErr)
|
||||
responseData := out.Data
|
||||
if isBase64 {
|
||||
// input was automatically base64 decoded.
|
||||
// for comparison we will base64 encode the response data.
|
||||
responseData = []byte(base64.StdEncoding.EncodeToString(out.Data))
|
||||
}
|
||||
assert.Equal(t, responseData, dataBytes)
|
||||
assert.Empty(t, out.Metadata)
|
||||
|
||||
out, invokeDeleteErr := deleteBlobRequest(ctx, client, blobName, "")
|
||||
assert.NoError(t, invokeDeleteErr)
|
||||
assert.Empty(t, out.Data)
|
||||
|
||||
// confirm the deletion.
|
||||
_, invokeSecondGetErr := getBlobRequest(ctx, client, blobName, false)
|
||||
assert.Error(t, invokeSecondGetErr)
|
||||
assert.Contains(t, invokeSecondGetErr.Error(), "ServiceCode=BlobNotFound")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
testCreatePublicBlob := func(shoudBePublic bool, containerName string) func(ctx flow.Context) error {
|
||||
// creates a blob and verifies whether it is public or not,
|
||||
// this depends on how the binding and the backing blob container are configured.
|
||||
return func(ctx flow.Context) error {
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
inputBytes := []byte("this is a public blob")
|
||||
invokeCreateRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: inputBytes,
|
||||
Metadata: map[string]string{},
|
||||
}
|
||||
|
||||
out, invokeCreateErr := client.InvokeBinding(ctx, invokeCreateRequest)
|
||||
assert.NoError(t, invokeCreateErr)
|
||||
|
||||
blobName := out.Metadata["blobName"]
|
||||
storageAccountName := os.Getenv("AzureBlobStorageAccount")
|
||||
if containerName == "" {
|
||||
containerName = os.Getenv("AzureBlobStorageContainer")
|
||||
}
|
||||
|
||||
// verify the blob is public via http request.
|
||||
url := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", storageAccountName, containerName, blobName)
|
||||
resp, httpErr := http.Get(url) // nolint:gosec
|
||||
assert.NoError(t, httpErr)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
if shoudBePublic {
|
||||
assert.Less(t, resp.StatusCode, 400)
|
||||
assert.Equal(t, inputBytes, body)
|
||||
} else {
|
||||
assert.Greater(t, resp.StatusCode, 399)
|
||||
}
|
||||
|
||||
// cleanup.
|
||||
_, invokeDeleteErr := deleteBlobRequest(ctx, client, blobName, "")
|
||||
assert.NoError(t, invokeDeleteErr)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
testCreateGetListDelete := func(ctx flow.Context) error {
|
||||
// basic test of create, get, list, delete operations.
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
input := "some example content"
|
||||
dataBytes := []byte(input)
|
||||
h := md5.New() // nolint:gosec
|
||||
h.Write(dataBytes)
|
||||
md5HashBase64 := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
|
||||
invokeCreateMetadata := map[string]string{
|
||||
"blobName": "filename.txt",
|
||||
"contentType": "text/plain",
|
||||
"contentMD5": md5HashBase64,
|
||||
"contentEncoding": "UTF-8",
|
||||
"contentLanguage": "en-us",
|
||||
"contentDisposition": "attachment",
|
||||
"cacheControl": "no-cache",
|
||||
"custom": "hello-world",
|
||||
}
|
||||
|
||||
invokeCreateRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: dataBytes,
|
||||
Metadata: invokeCreateMetadata,
|
||||
}
|
||||
|
||||
_, invokeCreateErr := client.InvokeBinding(ctx, invokeCreateRequest)
|
||||
|
||||
assert.NoError(t, invokeCreateErr)
|
||||
|
||||
invokeGetMetadata := map[string]string{
|
||||
"blobName": "filename.txt",
|
||||
"includeMetadata": "true",
|
||||
}
|
||||
|
||||
invokeGetRequest := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "get",
|
||||
Data: nil,
|
||||
Metadata: invokeGetMetadata,
|
||||
}
|
||||
|
||||
out, invokeGetErr := client.InvokeBinding(ctx, invokeGetRequest)
|
||||
assert.NoError(t, invokeGetErr)
|
||||
assert.Equal(t, string(out.Data), input)
|
||||
assert.Contains(t, out.Metadata, "custom")
|
||||
assert.Equal(t, out.Metadata["custom"], "hello-world")
|
||||
|
||||
out, invokeErr := listBlobRequest(ctx, client, "", "", -1, true, false, false, false, false)
|
||||
assert.NoError(t, invokeErr)
|
||||
var output []map[string]interface{}
|
||||
unmarshalErr := json.Unmarshal(out.Data, &output)
|
||||
assert.NoError(t, unmarshalErr)
|
||||
|
||||
found := false
|
||||
for _, item := range output {
|
||||
if item["Name"] == "filename.txt" {
|
||||
found = true
|
||||
properties, ok := item["Properties"].(map[string]interface{})
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, properties["ContentMD5"], invokeCreateMetadata["contentMD5"])
|
||||
assert.Equal(t, properties["ContentType"], invokeCreateMetadata["contentType"])
|
||||
assert.Equal(t, properties["CacheControl"], invokeCreateMetadata["cacheControl"])
|
||||
assert.Equal(t, properties["ContentDisposition"], invokeCreateMetadata["contentDisposition"])
|
||||
assert.Equal(t, properties["ContentEncoding"], invokeCreateMetadata["contentEncoding"])
|
||||
assert.Equal(t, properties["ContentLanguage"], invokeCreateMetadata["contentLanguage"])
|
||||
assert.Equal(t, item["Metadata"].(map[string]interface{})["custom"], invokeCreateMetadata["custom"])
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found)
|
||||
|
||||
out, invokeDeleteErr := deleteBlobRequest(ctx, client, "filename.txt", "")
|
||||
assert.NoError(t, invokeDeleteErr)
|
||||
assert.Empty(t, out.Data)
|
||||
|
||||
// confirm the deletion.
|
||||
_, invokeSecondGetErr := getBlobRequest(ctx, client, "filename.txt", false)
|
||||
assert.Error(t, invokeSecondGetErr)
|
||||
assert.Contains(t, invokeSecondGetErr.Error(), "ServiceCode=BlobNotFound")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
testListContents := func(ctx flow.Context) error {
|
||||
// simply runs the list contents operation.
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
_, invokeErr := listBlobRequest(ctx, client, "", "", -1, false, false, false, false, false)
|
||||
assert.NoError(t, invokeErr)
|
||||
|
||||
return invokeErr
|
||||
}
|
||||
|
||||
testListContentsWithOptions := func(ctx flow.Context) error {
|
||||
// verifies the list contents operation with several options:
|
||||
// - prefix specified to limit the list to a specific filename prefix
|
||||
// - marker specified to start the list from a specific blob name
|
||||
// - maxResults specified to limit the number of results returned
|
||||
// - includeMetadata specified to include the custom metadata in the list
|
||||
// - includeDeleted specified to include the deleted blobs in the list.
|
||||
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// create a blob with a prefix of "prefixA".
|
||||
invokeCreateMetadata1 := map[string]string{
|
||||
"blobName": "prefixA/filename.txt",
|
||||
}
|
||||
|
||||
invokeCreateRequest1 := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: []byte("some example content"),
|
||||
Metadata: invokeCreateMetadata1,
|
||||
}
|
||||
|
||||
_, invokeCreateErr1 := client.InvokeBinding(ctx, invokeCreateRequest1)
|
||||
assert.NoError(t, invokeCreateErr1)
|
||||
|
||||
// create another blob with a prefix of "prefixA".
|
||||
invokeCreateMetadata2 := map[string]string{
|
||||
"blobName": "prefixAfilename.txt",
|
||||
}
|
||||
|
||||
invokeCreateRequest2 := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: []byte("some example content"),
|
||||
Metadata: invokeCreateMetadata2,
|
||||
}
|
||||
|
||||
_, invokeCreateErr2 := client.InvokeBinding(ctx, invokeCreateRequest2)
|
||||
assert.NoError(t, invokeCreateErr2)
|
||||
|
||||
// create a blob with a prefix of "prefixB".
|
||||
invokeCreateMetadata3 := map[string]string{
|
||||
"blobName": "prefixB/filename.txt",
|
||||
}
|
||||
|
||||
invokeCreateRequest3 := &daprsdk.InvokeBindingRequest{
|
||||
Name: "azure-blobstorage-output",
|
||||
Operation: "create",
|
||||
Data: []byte("some example content"),
|
||||
Metadata: invokeCreateMetadata3,
|
||||
}
|
||||
|
||||
_, invokeCreateErr3 := client.InvokeBinding(ctx, invokeCreateRequest3)
|
||||
assert.NoError(t, invokeCreateErr3)
|
||||
|
||||
// list the contents of the container.
|
||||
out, listErr := listBlobRequest(ctx, client, "prefixA", "", 1, false, false, false, false, false)
|
||||
assert.NoError(t, listErr)
|
||||
|
||||
var output []map[string]interface{}
|
||||
unmarshalErr := json.Unmarshal(out.Data, &output)
|
||||
assert.NoError(t, unmarshalErr)
|
||||
|
||||
assert.Equal(t, len(output), 1)
|
||||
assert.Equal(t, output[0]["Name"], "prefixA/filename.txt")
|
||||
|
||||
nextMarker := out.Metadata["marker"]
|
||||
|
||||
// list the contents of the container with a marker.
|
||||
out2, listErr2 := listBlobRequest(ctx, client, "prefixA", nextMarker, 1, false, false, false, false, false)
|
||||
assert.NoError(t, listErr2)
|
||||
|
||||
var output2 []map[string]interface{}
|
||||
err2 := json.Unmarshal(out2.Data, &output2)
|
||||
assert.NoError(t, err2)
|
||||
|
||||
assert.Equal(t, len(output2), 1)
|
||||
assert.Equal(t, output2[0]["Name"], "prefixAfilename.txt")
|
||||
|
||||
// cleanup.
|
||||
_, invokeDeleteErr1 := deleteBlobRequest(ctx, client, "prefixA/filename.txt", "")
|
||||
assert.NoError(t, invokeDeleteErr1)
|
||||
_, invokeDeleteErr2 := deleteBlobRequest(ctx, client, "prefixAfilename.txt", "")
|
||||
assert.NoError(t, invokeDeleteErr2)
|
||||
_, invokeDeleteErr3 := deleteBlobRequest(ctx, client, "prefixB/filename.txt", "")
|
||||
assert.NoError(t, invokeDeleteErr3)
|
||||
|
||||
// list deleted items with prefix.
|
||||
out3, listErr3 := listBlobRequest(ctx, client, "prefixA", "", -1, false, false, false, false, true)
|
||||
assert.NoError(t, listErr3)
|
||||
|
||||
// this will only return the deleted items if soft delete policy is enabled for the blob service.
|
||||
assert.Equal(t, out3.Metadata["number"], "2")
|
||||
var output3 []map[string]interface{}
|
||||
err3 := json.Unmarshal(out3.Data, &output3)
|
||||
assert.NoError(t, err3)
|
||||
assert.Equal(t, len(output3), 2)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
testSnapshotDeleteAndList := func(ctx flow.Context) error {
|
||||
// verifies the list operation can list snapshots.
|
||||
// verifies the delete operation can delete snapshots.
|
||||
client, clientErr := daprsdk.NewClientWithPort(fmt.Sprint(currentGRPCPort))
|
||||
if clientErr != nil {
|
||||
panic(clientErr)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
cred, _ := azblob.NewSharedKeyCredential(os.Getenv("AzureBlobStorageAccount"), os.Getenv("AzureBlobStorageAccessKey"))
|
||||
service, _ := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", os.Getenv("AzureBlobStorageAccount")), cred, nil)
|
||||
containerClient := service.NewContainerClient(os.Getenv("AzureBlobStorageContainer"))
|
||||
|
||||
blobClient := containerClient.NewBlockBlobClient("snapshotthis.txt")
|
||||
uploadResp, uploadErr := blobClient.UploadBufferToBlockBlob(
|
||||
ctx, []byte("some example content"),
|
||||
azblob.HighLevelUploadToBlockBlobOption{}) // nolint: exhaustivestruct
|
||||
assert.NoError(t, uploadErr)
|
||||
uploadResp.Body.Close()
|
||||
_, createSnapshotErr := blobClient.CreateSnapshot(
|
||||
ctx, &azblob.CreateBlobSnapshotOptions{}) // nolint: exhaustivestruct
|
||||
assert.NoError(t, createSnapshotErr)
|
||||
|
||||
// list the contents of the container including snapshots for the specific blob only.
|
||||
out, listErr := listBlobRequest(ctx, client, "snapshotthis.txt", "", -1, false, true, false, false, false)
|
||||
assert.NoError(t, listErr)
|
||||
assert.Equal(t, out.Metadata["number"], "2")
|
||||
|
||||
// delete snapshots.
|
||||
_, invokeDeleteErr := deleteBlobRequest(ctx, client, "snapshotthis.txt", "only")
|
||||
assert.NoError(t, invokeDeleteErr)
|
||||
|
||||
// verify snapshot is deleted.
|
||||
out2, listErr2 := listBlobRequest(ctx, client, "snapshotthis.txt", "", -1, false, true, false, false, false)
|
||||
assert.NoError(t, listErr2)
|
||||
assert.Equal(t, out2.Metadata["number"], "1")
|
||||
|
||||
// create another snapshot.
|
||||
_, createSnapshotErr2 := blobClient.CreateSnapshot(
|
||||
ctx, &azblob.CreateBlobSnapshotOptions{}) // nolint: exhaustivestruct
|
||||
assert.NoError(t, createSnapshotErr2)
|
||||
|
||||
// delete base blob and snapshots all at once.
|
||||
_, invokeDeleteErr2 := deleteBlobRequest(ctx, client, "snapshotthis.txt", "include")
|
||||
assert.NoError(t, invokeDeleteErr2)
|
||||
|
||||
// verify base blob and snapshots are deleted.
|
||||
out3, listErr3 := listBlobRequest(ctx, client, "snapshotthis.txt", "", -1, false, true, false, false, false)
|
||||
assert.NoError(t, listErr3)
|
||||
assert.Equal(t, out3.Metadata["number"], "0")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
flow.New(t, "blobstorage binding authentication using service principal").
|
||||
Step(sidecar.Run(sidecarName,
|
||||
embedded.WithoutApp(),
|
||||
embedded.WithComponentsPath("./components/serviceprincipal"),
|
||||
embedded.WithDaprGRPCPort(currentGRPCPort),
|
||||
embedded.WithDaprHTTPPort(currentHTTPPort),
|
||||
runtime.WithSecretStores(
|
||||
secretstores_loader.New("local.env", func() secretstores.SecretStore {
|
||||
return secretstore_env.NewEnvSecretStore(log)
|
||||
}),
|
||||
),
|
||||
runtime.WithOutputBindings(
|
||||
bindings_loader.NewOutput("azure.blobstorage", func() bindings.OutputBinding {
|
||||
return blobstorage.NewAzureBlobStorage(log)
|
||||
}),
|
||||
))).
|
||||
Step("Create blob", testCreateGetListDelete).
|
||||
Run()
|
||||
|
||||
ports, err = dapr_testing.GetFreePorts(2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
currentGRPCPort = ports[0]
|
||||
currentHTTPPort = ports[1]
|
||||
|
||||
flow.New(t, "blobstorage binding main test suite with access key authentication").
|
||||
Step(sidecar.Run(sidecarName,
|
||||
embedded.WithoutApp(),
|
||||
embedded.WithComponentsPath("./components/accesskey"),
|
||||
embedded.WithDaprGRPCPort(currentGRPCPort),
|
||||
embedded.WithDaprHTTPPort(currentHTTPPort),
|
||||
runtime.WithSecretStores(
|
||||
secretstores_loader.New("local.env", func() secretstores.SecretStore {
|
||||
return secretstore_env.NewEnvSecretStore(log)
|
||||
}),
|
||||
),
|
||||
runtime.WithOutputBindings(
|
||||
bindings_loader.NewOutput("azure.blobstorage", func() bindings.OutputBinding {
|
||||
return blobstorage.NewAzureBlobStorage(log)
|
||||
}),
|
||||
))).
|
||||
Step("Create blob", testCreateGetListDelete).
|
||||
Step("Create blob from file", testCreateBlobFromFile(false)).
|
||||
Step("List contents", testListContents).
|
||||
Step("Create blob with conflicting filename", testCreateBlobWithFileNameConflict).
|
||||
Step("List contents with various options", testListContentsWithOptions).
|
||||
Step("Creating a public blob does not work", testCreatePublicBlob(false, "")).
|
||||
Step("Create blob with invalid content hash", testCreateBlobInvalidContentHash).
|
||||
Step("Test snapshot deletion and listing", testSnapshotDeleteAndList).
|
||||
Run()
|
||||
|
||||
ports, err = dapr_testing.GetFreePorts(2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
currentGRPCPort = ports[0]
|
||||
currentHTTPPort = ports[1]
|
||||
|
||||
flow.New(t, "decode base64 option for binary blobs with access key authentication").
|
||||
Step(sidecar.Run(sidecarName,
|
||||
embedded.WithoutApp(),
|
||||
embedded.WithComponentsPath("./components/decodeBase64"),
|
||||
embedded.WithDaprGRPCPort(currentGRPCPort),
|
||||
embedded.WithDaprHTTPPort(currentHTTPPort),
|
||||
runtime.WithSecretStores(
|
||||
secretstores_loader.New("local.env", func() secretstores.SecretStore {
|
||||
return secretstore_env.NewEnvSecretStore(log)
|
||||
}),
|
||||
),
|
||||
runtime.WithOutputBindings(
|
||||
bindings_loader.NewOutput("azure.blobstorage", func() bindings.OutputBinding {
|
||||
return blobstorage.NewAzureBlobStorage(log)
|
||||
}),
|
||||
))).
|
||||
Step("Create blob from file", testCreateBlobFromFile(true)).
|
||||
Run()
|
||||
|
||||
ports, err = dapr_testing.GetFreePorts(2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
currentGRPCPort = ports[0]
|
||||
currentHTTPPort = ports[1]
|
||||
|
||||
flow.New(t, "Blob Container Access Policy: Blog - with access key authentication").
|
||||
Step(sidecar.Run(sidecarName,
|
||||
embedded.WithoutApp(),
|
||||
embedded.WithComponentsPath("./components/publicAccessBlob"),
|
||||
embedded.WithDaprGRPCPort(currentGRPCPort),
|
||||
embedded.WithDaprHTTPPort(currentHTTPPort),
|
||||
runtime.WithSecretStores(
|
||||
secretstores_loader.New("local.env", func() secretstores.SecretStore {
|
||||
return secretstore_env.NewEnvSecretStore(log)
|
||||
}),
|
||||
),
|
||||
runtime.WithOutputBindings(
|
||||
bindings_loader.NewOutput("azure.blobstorage", func() bindings.OutputBinding {
|
||||
return blobstorage.NewAzureBlobStorage(log)
|
||||
}),
|
||||
))).
|
||||
Step("Creating a public blob works", testCreatePublicBlob(true, "publiccontainer")).
|
||||
Run()
|
||||
|
||||
ports, err = dapr_testing.GetFreePorts(2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
currentGRPCPort = ports[0]
|
||||
currentHTTPPort = ports[1]
|
||||
|
||||
flow.New(t, "Blob Container Access Policy: Container - with access key authentication").
|
||||
Step(sidecar.Run(sidecarName,
|
||||
embedded.WithoutApp(),
|
||||
embedded.WithComponentsPath("./components/publicAccessContainer"),
|
||||
embedded.WithDaprGRPCPort(currentGRPCPort),
|
||||
embedded.WithDaprHTTPPort(currentHTTPPort),
|
||||
runtime.WithSecretStores(
|
||||
secretstores_loader.New("local.env", func() secretstores.SecretStore {
|
||||
return secretstore_env.NewEnvSecretStore(log)
|
||||
}),
|
||||
),
|
||||
runtime.WithOutputBindings(
|
||||
bindings_loader.NewOutput("azure.blobstorage", func() bindings.OutputBinding {
|
||||
return blobstorage.NewAzureBlobStorage(log)
|
||||
}),
|
||||
))).
|
||||
Step("Creating a public blob works", testCreatePublicBlob(true, "alsopubliccontainer")).
|
||||
Run()
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azure-blobstorage-output
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.azure.blobstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: storageAccount
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccount
|
||||
key: AzureBlobStorageAccount
|
||||
- name: storageAccessKey
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccessKey
|
||||
key: AzureBlobStorageAccessKey
|
||||
- name: container
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageContainer
|
||||
key: AzureBlobStorageContainer
|
||||
- name: decodeBase64
|
||||
value: false
|
||||
- name: getBlobRetryCount
|
||||
value: 10
|
||||
- name: publicAccessLevel
|
||||
value: none
|
||||
|
||||
auth:
|
||||
secretStore: envvar-secret-store
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: envvar-secret-store
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azure-blobstorage-output
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.azure.blobstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: storageAccount
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccount
|
||||
key: AzureBlobStorageAccount
|
||||
- name: storageAccessKey
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccessKey
|
||||
key: AzureBlobStorageAccessKey
|
||||
- name: container
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageContainer
|
||||
key: AzureBlobStorageContainer
|
||||
- name: decodeBase64
|
||||
value: true
|
||||
- name: getBlobRetryCount
|
||||
value: 10
|
||||
- name: publicAccessLevel
|
||||
value: none
|
||||
|
||||
auth:
|
||||
secretStore: envvar-secret-store
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: envvar-secret-store
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azure-blobstorage-output
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.azure.blobstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: storageAccount
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccount
|
||||
key: AzureBlobStorageAccount
|
||||
- name: storageAccessKey
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccessKey
|
||||
key: AzureBlobStorageAccessKey
|
||||
- name: container
|
||||
value: publiccontainer
|
||||
- name: decodeBase64
|
||||
value: false
|
||||
- name: getBlobRetryCount
|
||||
value: 10
|
||||
- name: publicAccessLevel
|
||||
value: blob
|
||||
|
||||
auth:
|
||||
secretStore: envvar-secret-store
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: envvar-secret-store
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azure-blobstorage-output
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.azure.blobstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: storageAccount
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccount
|
||||
key: AzureBlobStorageAccount
|
||||
- name: storageAccessKey
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccessKey
|
||||
key: AzureBlobStorageAccessKey
|
||||
- name: container
|
||||
value: alsopubliccontainer
|
||||
- name: decodeBase64
|
||||
value: false
|
||||
- name: getBlobRetryCount
|
||||
value: 10
|
||||
- name: publicAccessLevel
|
||||
value: container
|
||||
|
||||
auth:
|
||||
secretStore: envvar-secret-store
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: envvar-secret-store
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: azure-blobstorage-output
|
||||
namespace: default
|
||||
spec:
|
||||
type: bindings.azure.blobstorage
|
||||
version: v1
|
||||
metadata:
|
||||
- name: storageAccount
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageAccount
|
||||
key: AzureBlobStorageAccount
|
||||
- name: container
|
||||
secretKeyRef:
|
||||
name: AzureBlobStorageContainer
|
||||
key: AzureBlobStorageContainer
|
||||
- name: azureTenantId
|
||||
secretKeyRef:
|
||||
name: AzureCertificationTenantId
|
||||
key: AzureCertificationTenantId
|
||||
- name: azureClientId
|
||||
secretKeyRef:
|
||||
name: AzureCertificationServicePrincipalClientId
|
||||
key: AzureCertificationServicePrincipalClientId
|
||||
- name: azureClientSecret
|
||||
secretKeyRef:
|
||||
name: AzureCertificationServicePrincipalClientSecret
|
||||
key: AzureCertificationServicePrincipalClientSecret
|
||||
|
||||
auth:
|
||||
secretStore: envvar-secret-store
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Component
|
||||
metadata:
|
||||
name: envvar-secret-store
|
||||
namespace: default
|
||||
spec:
|
||||
type: secretstores.local.env
|
||||
version: v1
|
||||
metadata:
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: dapr.io/v1alpha1
|
||||
kind: Configuration
|
||||
metadata:
|
||||
name: azureblobstoragebindingconfig
|
||||
spec:
|
||||
features:
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
module github.com/dapr/components-contrib/tests/certification/bindings/azure/blobstorage
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0
|
||||
github.com/dapr/components-contrib v1.5.1-rc.1
|
||||
github.com/dapr/components-contrib/tests/certification v0.0.0-20211130185200-4918900c09e1
|
||||
github.com/dapr/dapr v1.5.2-0.20220106203753-0e6bcbabc8ba
|
||||
github.com/dapr/go-sdk v1.3.0
|
||||
github.com/dapr/kit v0.0.2-0.20210614175626-b9074b64d233
|
||||
github.com/stretchr/testify v1.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
|
||||
contrib.go.opencensus.io/exporter/zipkin v0.1.1 // indirect
|
||||
github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a // indirect
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.1.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.20.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.23 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/andybalholm/brotli v1.0.2 // indirect
|
||||
github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f // indirect
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/fasthttp/router v1.3.8 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-kit/log v0.1.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-logr/logr v0.3.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/cel-go v0.9.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.1 // indirect
|
||||
github.com/grandcat/zeroconf v0.0.0-20190424104450-85eadb44205c // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 // indirect
|
||||
github.com/hashicorp/consul/api v1.3.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/serf v0.8.2 // indirect
|
||||
github.com/imdario/mergo v0.3.10 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/klauspost/compress v1.13.4 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/dns v1.1.35 // indirect
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.2.2 // indirect
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/statsd_exporter v0.22.3 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20210217112953-d4a072536008 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.31.1-0.20211216042702-258a4c17b4f4 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
|
||||
google.golang.org/grpc v1.40.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/api v0.20.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.20.0 // indirect
|
||||
k8s.io/apimachinery v0.20.0 // indirect
|
||||
k8s.io/client-go v0.20.0 // indirect
|
||||
k8s.io/klog/v2 v2.4.0 // indirect
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.7.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/dapr/components-contrib/tests/certification => ../../../
|
||||
|
||||
replace github.com/dapr/components-contrib => ../../../../../
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue