Skip to content

Commit

Permalink
Merge pull request #199 from scality/feature/ZENKO-367/backbeat-end-t…
Browse files Browse the repository at this point in the history
…o-end-tests-azure

feature: ZENKO-367 CRR to Azure end-to-end tests
  • Loading branch information
ssalaues committed Jul 20, 2018
2 parents 475d8f8 + bdd3f75 commit 82be3e8
Show file tree
Hide file tree
Showing 11 changed files with 490 additions and 38 deletions.
3 changes: 3 additions & 0 deletions tests/.env
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,13 @@ export AWS_S3_BACKBEAT_BUCKET_NAME=ci-zenko-aws-crr-target-bucket
export AWS_S3_BACKBEAT_SRC_BUCKET_NAME=ci-zenko-aws-crr-src-bucket
export AWS_S3_BUCKET_NAME=ci-zenko-aws-target-bucket
export AWS_S3_BUCKET_NAME_2=ci-zenko-aws-target-bucket-2
export AWS_S3_BACKEND_SOURCE_LOCATION=ci-zenko-aws-source-location
export AWS_S3_BACKEND_DESTINATION_LOCATION=ci-zenko-aws-destination-location
export AZURE_BACKBEAT_CONTAINER_NAME=ci-zenko-azure-crr-target-bucket
export AZURE_BACKBEAT_SRC_CONTAINER_NAME=ci-zenko-azure-crr-src-bucket
export AZURE_BACKEND_CONTAINER_NAME=ci-zenko-azure-target-bucket
export AZURE_BACKEND_CONTAINER_NAME_2=ci-zenko-azure-target-bucket-2
export AZURE_BACKEND_DESTINATION_LOCATION=ci-zenko-azure-destination-location
export GCP_BUCKET_NAME=ci-zenko-gcp-target-bucket
export GCP_BUCKET_NAME_2=ci-zenko-gcp-target-bucket-2
export GCP_CRR_BUCKET_NAME=ci-zenko-gcp-crr-target-bucket
Expand Down
2 changes: 2 additions & 0 deletions tests/.secrets.env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
export ZENKO_STORAGE_ACCOUNT_ACCESS_KEY=
export ZENKO_STORAGE_ACCOUNT_SECRET_KEY=
export AWS_ACCESS_KEY_ID=
export AWS_GCP_BACKEND_ACCESS_KEY=
export AWS_GCP_BACKEND_ACCESS_KEY_2=
Expand Down
158 changes: 152 additions & 6 deletions tests/zenko_e2e/backbeat/ReplicationUtility.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@ const fs = require('fs');

const { scalityS3Client, awsS3Client } = require('./s3SDK');

const srcLocation = process.env.AWS_SOURCE_LOCATION;
const destLocation = process.env.AWS_DESTINATION_LOCATION;
const srcLocation = process.env.AWS_S3_BACKEND_SOURCE_LOCATION;
const destAWSLocation = process.env.AWS_S3_BACKEND_DESTINATION_LOCATION;
const destAzureLocation = process.env.AZURE_BACKEND_DESTINATION_LOCATION;
const REPLICATION_TIMEOUT = 10000;

class ReplicationUtility {
Expand Down Expand Up @@ -37,6 +38,11 @@ class ReplicationUtility {
versionInfo.VersionId, next), cb);
}

_deleteBlobList(blobList, containerName, cb) {
async.each(blobList, (blob, next) =>
this.deleteBlob(containerName, blob.name, undefined, next), cb);
}

_setS3Client(s3Client) {
this.s3 = s3Client;
return this;
Expand Down Expand Up @@ -65,6 +71,20 @@ class ReplicationUtility {
});
}

deleteAllBlobs(containerName, keyPrefix, cb) {
const options = { include: 'metadata' };
this.azure.listBlobsSegmented(containerName, null, options,
(err, result, response) => {
if (err) {
return cb(err);
}
// Only delete the blobs put by the current test.
const filteredEntries = result.entries.filter(entry =>
entry.name.startsWith(keyPrefix));
return this._deleteBlobList(filteredEntries, containerName, cb);
});
}

putObject(bucketName, objectName, content, cb) {
this.s3.putObject({
Bucket: bucketName,
Expand Down Expand Up @@ -315,6 +335,24 @@ class ReplicationUtility {
}, cb);
}

getBlobToText(containerName, blob, cb) {
this.azure.getBlobToText(containerName, blob, cb);
}

getBlob(containerName, blob, cb) {
const request = this.azure.createReadStream(containerName, blob);
const data = [];
let totalLength = 0;
request.on('data', chunk => {
totalLength += chunk.length;
data.push(chunk);
});
request.on('end', () => {
cb(null, Buffer.concat(data, totalLength))
});
request.on('error', err => cb(err));
}

createBucket(bucketName, cb) {
this.s3.createBucket({ Bucket: bucketName }, cb);
}
Expand Down Expand Up @@ -437,6 +475,10 @@ class ReplicationUtility {
}, cb);
}

deleteBlob(containerName, blob, options, cb) {
this.azure.deleteBlob(containerName, blob, options, cb);
}

// Continue getting head object while the status is PENDING or PROCESSING.
waitUntilReplicated(bucketName, key, versionId, cb) {
let status;
Expand All @@ -461,8 +503,8 @@ class ReplicationUtility {
// Continue getting object while the object exists.
waitUntilDeleted(bucketName, key, client, cb) {
let objectExists;
const method = 'getObject';
const expectedCode = 'NoSuchKey';
const method = client === 'azure' ? 'getBlobToText' : 'getObject';
const expectedCode = client === 'azure' ? 'BlobNotFound' : 'NoSuchKey';
return async.doWhilst(callback =>
this[method](bucketName, key, err => {
if (err && err.code !== expectedCode) {
Expand Down Expand Up @@ -495,9 +537,9 @@ class ReplicationUtility {
destData.ContentLength);
this._compareObjectBody(srcData.Body, destData.Body);
const srcUserMD = srcData.Metadata;
assert.strictEqual(srcUserMD[`${destLocation}-version-id`],
assert.strictEqual(srcUserMD[`${destAWSLocation}-version-id`],
destData.VersionId);
assert.strictEqual(srcUserMD[`${destLocation}-replication-status`],
assert.strictEqual(srcUserMD[`${destAWSLocation}-replication-status`],
'COMPLETED');
const destUserMD = destData.Metadata;
assert.strictEqual(destUserMD['scal-version-id'],
Expand All @@ -516,6 +558,81 @@ class ReplicationUtility {
});
}

compareObjectsAzure(srcBucket, containerName, key, cb) {
return async.series([
next => this.waitUntilReplicated(srcBucket, key, undefined, next),
next => this.getObject(srcBucket, key, next),
next => this.azure.getBlobProperties(containerName,
`${srcBucket}/${key}`, next),
next => this.getBlob(containerName,
`${srcBucket}/${key}`, next),
], (err, data) => {
if (err) {
return cb(err);
}
const srcData = data[1];
const destProperties = data[2];
const destPropResult = destProperties[0];
const destPropResponse = destProperties[1];
const destDataBuf = data[3];
assert.strictEqual(srcData.ReplicationStatus, 'COMPLETED');
// Azure does not have versioning so there is no version metadata
// from Azure to set on the source.
assert.strictEqual(
srcData.Metadata[`${destAzureLocation}-replication-status`],
'COMPLETED');
assert.strictEqual(
destPropResult.metadata['scal_replication_status'], 'REPLICA');
assert.strictEqual(
destPropResult.metadata['scal_version_id'], srcData.VersionId);
assert.strictEqual(
destPropResponse.headers['x-ms-meta-scal_replication_status'],
'REPLICA');
assert.strictEqual(
destPropResponse.headers['x-ms-meta-scal_version_id'],
srcData.VersionId);
this._compareObjectBody(srcData.Body, destDataBuf);
return cb();
});
}

compareAzureObjectProperties(srcBucket, containerName, key, cb) {
return async.series([
next => this.waitUntilReplicated(srcBucket, key, undefined, next),
next => this.getHeadObject(srcBucket, key, next),
next => this.azure.getBlobProperties(containerName,
`${srcBucket}/${key}`, next),
], (err, data) => {
if (err) {
return cb(err);
}
const srcData = data[1];
const destData = data[2];
const destResult = destData[0];
const destResponse = destData[1];
const { contentSettings } = destResult;
const { headers } = destResponse;
let expectedVal = srcData.Metadata.customkey;
assert.strictEqual(expectedVal,
destResult.metadata['customkey']);
assert.strictEqual(expectedVal,
headers['x-ms-meta-customkey']);
expectedVal = srcData.ContentType;
assert.strictEqual(expectedVal, contentSettings.contentType);
assert.strictEqual(expectedVal, headers['content-type']);
expectedVal = srcData.CacheControl;
assert.strictEqual(expectedVal, contentSettings.cacheControl);
assert.strictEqual(expectedVal, headers['cache-control']);
expectedVal = srcData.ContentEncoding;
assert.strictEqual(expectedVal, contentSettings.contentEncoding);
assert.strictEqual(expectedVal, headers['content-encoding']);
expectedVal = srcData.ContentLanguage;
assert.strictEqual(expectedVal, contentSettings.contentLanguage);
assert.strictEqual(expectedVal, headers['content-language']);
return cb();
});
};

compareACLsAWS(srcBucket, destBucket, key, cb) {
return async.series([
next => this.waitUntilReplicated(srcBucket, key, undefined, next),
Expand Down Expand Up @@ -555,6 +672,35 @@ class ReplicationUtility {
});
}

compareObjectTagsAzure(srcBucket, destContainer, key, scalityVersionId,
cb) {
return async.series([
next => this.waitUntilReplicated(srcBucket, key, scalityVersionId,
next),
next => this.getObjectTagging(srcBucket, key, scalityVersionId,
next),
next => this.azure.getBlobMetadata(destContainer,
`${srcBucket}/${key}`, next),
], (err, data) => {
if (err) {
return cb(err);
}
const srcData = data[1];
const destData = data[2];
const destTagSet = [];
const destTags = destData[0].metadata.tags;
if (destTags) {
const parsedTags = JSON.parse(destTags);
Object.keys(parsedTags).forEach(key => destTagSet.push({
Key: key,
Value: parsedTags[key],
}));
}
assert.deepStrictEqual(srcData.TagSet, destTagSet);
return cb();
});
}

assertNoObject(bucketName, key, cb) {
this.getObject(bucketName, key, err => {
assert.strictEqual(err.code, 'NoSuchKey');
Expand Down
77 changes: 51 additions & 26 deletions tests/zenko_e2e/backbeat/Using.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,47 +10,72 @@
```

2. Create an account using Orbit.
3. Export the access key and secret key of that account:
3. Export the access key and secret key of that account (for example, in
`.secrets.env`):

```
export ZENKO_BACKBEAT_ACCESS_KEY=<access-key>
export ZENKO_BACKBEAT_SECRET_KEY=<secret-key>
export ZENKO_STORAGE_ACCOUNT_ACCESS_KEY=<zenko-access-key>
export ZENKO_STORAGE_ACCOUNT_SECRET_KEY=<zenko-secret-key>
```

3. Install node and npm.
4. Navigate to `Zenko/tests/zenko_e2e/backbeat`.
5. Install node modules: `npm i`.
4. Install node and npm.
5. Navigate to `Zenko/tests/zenko_e2e/backbeat`.
6. Install node modules: `npm i`.

### Tests for CRR to AWS:

1. Create a bucket on AWS `<destination-bucket-name>` with versioning enabled.
2. In Orbit, create an AWS location `<destination-location-name>` with an AWS
`<destination-bucket-name>`.
3. In Orbit, create an AWS location `<source-location-name>`.
4. Export the access key, secret key, AWS bucket name, and AWS location:
1. Create a bucket on AWS `<destination-aws-bucket-name>` with versioning
enabled.
2. In Orbit, create an AWS storage location `<destination-aws-location-name>`
with an AWS bucket `<destination-aws-bucket-name>`.
3. In Orbit, create an AWS location `<source-aws-location-name>`.
4. Create a container on Azure `<destination-azure-container-name>`.
5. In Orbit, create an Azure storage location
`<destination-azure-location-name>` with an Azure container `<destination-azure-container-name>`.
6. Export the keys, bucket name, container name, and storage location names
(for example, in `.env` and `.secrets.env`):

```
export AWS_S3_BACKBEAT_ACCESS_KEY=<access-key>
export AWS_S3_BACKBEAT_SECRET_KEY=<secret-key>
export AWS_S3_BACKBEAT_BUCKET_NAME=<destination-bucket-name>
export AWS_DESTINATION_LOCATION=<destination-location-name>
export AWS_SOURCE_LOCATION=<source-location-name>
export AWS_S3_BACKEND_ACCESS_KEY=<aws-access-key>
export AWS_S3_BACKEND_SECRET_KEY=<aws-secret-key>
export AWS_S3_BACKBEAT_BUCKET_NAME=<destination-aws-bucket-name>
export AWS_S3_BACKEND_DESTINATION_LOCATION=<destination-aws-location-name>
export AWS_S3_BACKEND_SOURCE_LOCATION=<source-aws-location-name>
export AZURE_BACKEND_ACCOUNT_NAME=<azure-account-name>
export AZURE_BACKEND_ACCESS_KEY=<azure-access-key>
export AZURE_BACKEND_ENDPOINT=<azure-endpoint>
export AZURE_BACKBEAT_CONTAINER_NAME=<destination-azure-container-name>
export AZURE_BACKEND_DESTINATION_LOCATION=<destination-azure-location-name>
```

5. Run the test suite: `npm run test_crr`.
7. If using `*.env` files, source the files:

```
source .env && source .secrets.env
```

8. Run the test suite: `npm run test_crr`.

### Tests for Backbeat API:

1. Create a bucket on AWS `<destination-bucket-name>` with versioning enabled.
2. In Orbit, create an AWS location `<destination-location-name>` with an AWS
`<destination-bucket-name>`.
3. Export the access key, secret key, AWS bucket name, and AWS location:
1. Create a bucket on AWS `<destination-aws-bucket-name>` with versioning
enabled.
2. In Orbit, create an AWS location `<destination-aws-location-name>` with an
AWS bucket `<destination-aws-bucket-name>`.
3. Export the keys, AWS bucket name, and AWS location (for example, in `.env`
and `.secrets.env`):

```
export AWS_S3_BACKEND_ACCESS_KEY=<aws-access-key>
export AWS_S3_BACKEND_SECRET_KEY=<aws-secret-key>
export AWS_S3_BACKBEAT_BUCKET_NAME=<destination-aws-bucket-name>
export AWS_S3_BACKEND_DESTINATION_LOCATION=<destination-aws-location-name>
```

4. If using `*.env` files, source the files:

```
export AWS_S3_BACKBEAT_ACCESS_KEY=<access-key>
export AWS_S3_BACKBEAT_SECRET_KEY=<secret-key>
export AWS_S3_BACKBEAT_BUCKET_NAME=<destination-bucket-name>
export AWS_DESTINATION_LOCATION=<destination-location-name>
source .env && source .secrets.env
```

4. Run the test suite: `npm run test_api`.
5. Run the test suite: `npm run test_api`.
10 changes: 10 additions & 0 deletions tests/zenko_e2e/backbeat/azureSDK.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
const azure = require('azure-storage');

const storageAccount = process.env.AZURE_BACKBEAT_ACCOUNT_NAME;
const storageAccessKey = process.env.AZURE_BACKBEAT_ACCESS_KEY;
const storageEndpoint = process.env.AZURE_BACKBEAT_ENDPOINT;

const sharedBlobSvc =
azure.createBlobService(storageAccount, storageAccessKey, storageEndpoint);

module.exports = sharedBlobSvc;
Loading

0 comments on commit 82be3e8

Please sign in to comment.