diff --git a/collectors/aws/ec2/describeSnapshots.js b/collectors/aws/ec2/describeSnapshots.js index 0cdf26ef13..854472d3ac 100644 --- a/collectors/aws/ec2/describeSnapshots.js +++ b/collectors/aws/ec2/describeSnapshots.js @@ -8,6 +8,7 @@ var helpers = require(__dirname + '/../../../helpers/aws'); module.exports = function(AWSConfig, collection, retries, callback) { var ec2 = new AWS.EC2(AWSConfig); var sts = new AWS.STS(AWSConfig); + var paginating = false; helpers.makeCustomCollectorCall(sts, 'getCallerIdentity', {}, retries, null, null, null, function(stsErr, stsData) { if (stsErr || !stsData.Account) { @@ -16,6 +17,7 @@ module.exports = function(AWSConfig, collection, retries, callback) { } var params = { + MaxResults: 1000, Filters: [ { Name: 'owner-id', @@ -32,13 +34,36 @@ module.exports = function(AWSConfig, collection, retries, callback) { ] }; - helpers.makeCustomCollectorCall(ec2, 'describeSnapshots', params, retries, null, null, null, function(err, data) { + var paginateCb = function(err, data) { if (err) { collection.ec2.describeSnapshots[AWSConfig.region].err = err; - } else { - collection.ec2.describeSnapshots[AWSConfig.region].data = data.Snapshots; + } else if (data) { + if (paginating && data.Snapshots && data.Snapshots.length && + collection.ec2.describeSnapshots[AWSConfig.region].data && + collection.ec2.describeSnapshots[AWSConfig.region].data.length) { + collection.ec2.describeSnapshots[AWSConfig.region].data = collection.ec2.describeSnapshots[AWSConfig.region].data.concat(data.Snapshots); + } else if (!paginating) { + collection.ec2.describeSnapshots[AWSConfig.region].data = data.Snapshots; + } + if (data.NextToken && + collection.ec2.describeSnapshots[AWSConfig.region].data && + collection.ec2.describeSnapshots[AWSConfig.region].data.length) { + paginating = true; + return execute(data.NextToken); + } } + callback(); - }); + }; + function execute(nextToken) { // eslint-disable-line no-inner-declarations + var localParams = JSON.parse(JSON.stringify(params || {})); + if (nextToken) localParams['NextToken'] = nextToken; + if (nextToken) { + helpers.makeCustomCollectorCall(ec2, 'describeSnapshots', localParams, retries, null, null, null, paginateCb); + } else { + helpers.makeCustomCollectorCall(ec2, 'describeSnapshots', params, retries, null, null, null, paginateCb); + } + } + execute(); }); -}; \ No newline at end of file +}; diff --git a/exports.js b/exports.js index 0e2a00a76e..9ca097916d 100644 --- a/exports.js +++ b/exports.js @@ -53,6 +53,7 @@ module.exports = { 'workgroupEnforceConfiguration' : require(__dirname + '/plugins/aws/athena/workgroupEnforceConfiguration.js'), 'customModelInVpc' :require(__dirname + '/plugins/aws/bedrock/customModelInVpc.js'), + 'bedrockInUse' :require(__dirname + '/plugins/aws/bedrock/bedrockInUse.js'), 'privateCustomModel' :require(__dirname + '/plugins/aws/bedrock/privateCustomModel.js'), 'customModelHasTags' :require(__dirname + '/plugins/aws/bedrock/customModelHasTags.js'), 'modelInvocationLoggingEnabled' :require(__dirname + '/plugins/aws/bedrock/modelInvocationLoggingEnabled.js'), @@ -725,7 +726,7 @@ module.exports = { 'workspacePublicAccessDisabled' : require(__dirname + '/plugins/azure/machinelearning/workspacePublicAccessDisabled.js'), 'workspaceLoggingEnabled' : require(__dirname + '/plugins/azure/machinelearning/workspaceLoggingEnabled.js'), - 'mlWorkspaceHasTags' : require(__dirname + '/plugins/azure/machinelearning/mlWorkspaceHasTags.js'), + 'mlWorkspaceHasTags' : require(__dirname + '/plugins/azure/machinelearning/mlWorkspaceHasTags.js'), 'minimumTlsVersion' : require(__dirname + '/plugins/azure/redisCache/minimumTlsVersion.js'), diff --git a/helpers/aws/api.js b/helpers/aws/api.js index 116cb46c18..b28dff1cbd 100644 --- a/helpers/aws/api.js +++ b/helpers/aws/api.js @@ -1449,6 +1449,10 @@ var calls = { describeDBClusters: { property: 'DBClusters', paginate: 'Marker' + }, + describeDBInstances: { + property: 'DBInstances', + paginate: 'Marker' } }, Organizations: { diff --git a/helpers/aws/api_multipart.js b/helpers/aws/api_multipart.js index 435ad2b25b..9389518a67 100644 --- a/helpers/aws/api_multipart.js +++ b/helpers/aws/api_multipart.js @@ -838,6 +838,10 @@ var calls = [ describeDBClusters: { property: 'DBClusters', paginate: 'Marker' + }, + describeDBInstances: { + property: 'DBInstances', + paginate: 'Marker' } }, Organizations: { diff --git a/plugins/aws/bedrock/bedrockInUse.js b/plugins/aws/bedrock/bedrockInUse.js new file mode 100644 index 0000000000..328a121260 --- /dev/null +++ b/plugins/aws/bedrock/bedrockInUse.js @@ -0,0 +1,52 @@ +var async = require('async'); +var helpers = require('../../../helpers/aws'); + +module.exports = { + title: 'AWS Bedrock In Use', + category: 'AI & ML', + domain: 'Machine Learning', + severity: 'Low', + description: 'Ensures that AWS Bedrock service is in use within your AWS account.', + more_info: 'AWS Bedrock provides access to high-performing foundation models from leading AI startups and Amazon through a unified API, enabling easy experimentation, customization, and deployment of generative AI applications with robust security and privacy features.', + link: 'https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html', + recommended_action: 'Use Bedrock service to utilize top foundation models with strong security and customization.', + apis: ['Bedrock:listCustomModels'], + realtime_triggers: ['bedrock:DeleteCustomModel'], + + run: function(cache, settings, callback) { + var results = []; + var source = {}; + var regions = helpers.regions(settings); + + async.each(regions.bedrock, function(region, rcb){ + var listCustomModels = helpers.addSource(cache, source, + ['bedrock', 'listCustomModels', region]); + + if (!listCustomModels) return rcb(); + + if (listCustomModels.err && listCustomModels.err.message.includes('Unknown operation')) { + helpers.addResult(results, 0, + 'Custom model service is not available in this region', region); + return rcb(); + } + + if (listCustomModels.err || !listCustomModels.data) { + helpers.addResult(results, 3, + `Unable to query for Bedrock custom model list: ${helpers.addError(listCustomModels)}`, region); + return rcb(); + } + + if (!listCustomModels.data.length) { + helpers.addResult(results, 2, 'Bedrock service is not in use', region); + return rcb(); + } else { + helpers.addResult(results, 0, 'Bedrock service is in use', region); + return rcb(); + + } + + }, function(){ + callback(null, results, source); + }); + } +}; diff --git a/plugins/aws/bedrock/bedrockInUse.spec.js b/plugins/aws/bedrock/bedrockInUse.spec.js new file mode 100644 index 0000000000..d47f6e41af --- /dev/null +++ b/plugins/aws/bedrock/bedrockInUse.spec.js @@ -0,0 +1,73 @@ +var expect = require('chai').expect; +const bedrockInUse = require('./bedrockInUse'); + +const listCustomModels = [ + { + "modelArn": "arn:aws:bedrock:us-east-1:11223344:custom-model/amazon.titan-text-lite-v1:0:4k/2ytyyx8nid0h", + "modelName": "model2", + "creationTime": "2023-11-29T10:45:43.056000+00:00", + "baseModelArn": "arn:aws:bedrock:us-east-1::foundation-model/amazon.titan-text-lite-v1:0:4k", + "baseModelName": "" + }, + { + "modelArn": "arn:aws:bedrock:us-east-1:11223344:custom-model/amazon.titan-text-lite-v1:0:4k/vjqsydtdhkpz", + "modelName": "testmodel2", + "creationTime": "2023-11-28T11:29:18.655000+00:00", + "baseModelArn": "arn:aws:bedrock:us-east-1::foundation-model/amazon.titan-text-lite-v1:0:4k", + "baseModelName": "" + } +]; + + +const createCache = (listModels) => { + return { + bedrock: { + listCustomModels: { + 'us-east-1': { + err: null, + data: listModels + } + }, + } + }; +}; + + +describe('bedrockInUse', function () { + describe('run', function () { + it('should PASS if Bedrock service is in use', function (done) { + const cache = createCache([listCustomModels[0]]); + bedrockInUse.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(0); + expect(results[0].region).to.equal('us-east-1'); + expect(results[0].message).to.include('Bedrock service is in use') + done(); + }); + }); + + it('should FAIL if Bedrock service is not in use', function (done) { + const cache = createCache([]); + bedrockInUse.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(2); + expect(results[0].region).to.equal('us-east-1'); + expect(results[0].message).to.include('Bedrock service is not in use') + done(); + }); + }); + + + it('should UNKNOWN if unable to query Bedrock custom model', function (done) { + const cache = createCache(null, null); + bedrockInUse.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(3); + expect(results[0].region).to.equal('us-east-1'); + expect(results[0].message).to.include('Unable to query for Bedrock custom model list') + done(); + }); + }); + + }); +}); diff --git a/plugins/aws/ec2/appTierInstanceIamRole.js b/plugins/aws/ec2/appTierInstanceIamRole.js index eb39ea7b3e..2088e85081 100644 --- a/plugins/aws/ec2/appTierInstanceIamRole.js +++ b/plugins/aws/ec2/appTierInstanceIamRole.js @@ -11,7 +11,7 @@ module.exports = { more_info: 'EC2 instances should have IAM roles configured with necessary permission to access other AWS services', link: 'https://aws.amazon.com/blogs/security/new-attach-an-aws-iam-role-to-an-existing-amazon-ec2-instance-by-using-the-aws-cli/', recommended_action: 'Modify EC2 instances to attach IAM roles with required IAM policies', - apis: ['EC2:describeInstances', 'EC2:describeTags', 'IAM:listRoles', 'IAM:listRolePolicies', 'IAM:listAttachedRolePolicies'], + apis: ['EC2:describeInstances', 'IAM:listRoles', 'IAM:listRolePolicies', 'IAM:listAttachedRolePolicies'], settings: { ec2_app_tier_tag_key: { name: 'EC2 App-Tier Tag Key', @@ -71,14 +71,8 @@ module.exports = { var resource = `arn:${awsOrGov}:ec2:${region}:${accountId}:instance/${entry.InstanceId}`; var tagFound = false; - for (let t in describeTags.data) { - let tag = describeTags.data[t]; - - if (tag.ResourceId && tag.ResourceId === entry.InstanceId && - tag.Key && tag.Key === config.ec2_app_tier_tag_key) { - tagFound = true; - break; - } + if (entry.Tags && entry.Tags.length) { + tagFound = entry.Tags.find(tag => tag.Key === config.ec2_app_tier_tag_key); } if (!tagFound) { @@ -93,7 +87,7 @@ module.exports = { } else { var roleNameArr = entry.IamInstanceProfile.Arn.split('/'); var roleName = roleNameArr[roleNameArr.length-1]; - + // Get managed policies attached to role var listAttachedRolePolicies = helpers.addSource(cache, source, ['iam', 'listAttachedRolePolicies', region, roleName]); @@ -136,10 +130,10 @@ module.exports = { } } } - + cb(); }); - + return rcb(); }, function(){ callback(null, results, source); diff --git a/plugins/aws/ec2/webTierInstanceIamRole.js b/plugins/aws/ec2/webTierInstanceIamRole.js index 52b4d7df89..42b03a5e0b 100644 --- a/plugins/aws/ec2/webTierInstanceIamRole.js +++ b/plugins/aws/ec2/webTierInstanceIamRole.js @@ -11,7 +11,7 @@ module.exports = { more_info: 'EC2 instances should have IAM roles configured with necessary permission to access other AWS services', link: 'https://aws.amazon.com/blogs/security/new-attach-an-aws-iam-role-to-an-existing-amazon-ec2-instance-by-using-the-aws-cli/', recommended_action: 'Modify EC2 instances to attach IAM roles with required IAM policies', - apis: ['EC2:describeInstances', 'EC2:describeTags', 'IAM:listRoles', 'IAM:listRolePolicies', 'IAM:listAttachedRolePolicies'], + apis: ['EC2:describeInstances', 'IAM:listRoles', 'IAM:listRolePolicies', 'IAM:listAttachedRolePolicies'], settings: { ec2_web_tier_tag_key: { name: 'EC2 Web-Tier Tag Key', @@ -71,14 +71,8 @@ module.exports = { var resource = `arn:${awsOrGov}:ec2:${region}:${accountId}:instance/${entry.InstanceId}`; var tagFound = false; - for (let t in describeTags.data) { - let tag = describeTags.data[t]; - - if (tag.ResourceId && tag.ResourceId === entry.InstanceId && - tag.Key && tag.Key === config.ec2_web_tier_tag_key) { - tagFound = true; - break; - } + if (entry.Tags && entry.Tags.length) { + tagFound = entry.Tags.find(tag => tag.Key === config.ec2_web_tier_tag_key); } if (!tagFound) { @@ -93,7 +87,7 @@ module.exports = { } else { var roleNameArr = entry.IamInstanceProfile.Arn.split('/'); var roleName = roleNameArr[roleNameArr.length-1]; - + // Get managed policies attached to role var listAttachedRolePolicies = helpers.addSource(cache, source, ['iam', 'listAttachedRolePolicies', region, roleName]); @@ -136,10 +130,10 @@ module.exports = { } } } - + cb(); }); - + return rcb(); }, function(){ callback(null, results, source); diff --git a/plugins/aws/elbv2/elbv2TLSVersionCipherEnabled.js b/plugins/aws/elbv2/elbv2TLSVersionCipherEnabled.js index 24c472da85..47f1e26756 100644 --- a/plugins/aws/elbv2/elbv2TLSVersionCipherEnabled.js +++ b/plugins/aws/elbv2/elbv2TLSVersionCipherEnabled.js @@ -5,9 +5,9 @@ module.exports = { title: 'ELBv2 TLS Version and Cipher Header Disabled', category: 'ELBv2', domain: 'Content Delivery', - severity: 'Medium', + severity: 'low', description: 'Ensures that AWS ELBv2 load balancers does not have TLS version and cipher headers enabled.', - more_info: 'Disabling TLS version and cipher headers mitigates potential information leakage risks and aligns with security best practices, ensuring that sensitive details are not exposed to unauthorized parties.', + more_info: 'TLS Version and Cipher Header provides information about the specific TLS version and cipher suite used during the establishment of the secure connection. Enabling the header might leak the sensitive information about the encryption protocols and algorithms being used', link: 'https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html', recommended_action: 'Update ELBv2 load balancer traffic configuration to disable TLS version and cipher headers', apis: ['ELBv2:describeLoadBalancers', 'ELBv2:describeLoadBalancerAttributes'], diff --git a/plugins/aws/kms/kmsKeyRotation.js b/plugins/aws/kms/kmsKeyRotation.js index e788725b7f..55b744d3a9 100644 --- a/plugins/aws/kms/kmsKeyRotation.js +++ b/plugins/aws/kms/kmsKeyRotation.js @@ -111,6 +111,12 @@ module.exports = { return; } + if (describeKeyData.KeyMetadata && describeKeyData.KeyMetadata.KeySpec && describeKeyData.KeyMetadata.KeySpec !== 'SYMMETRIC_DEFAULT') { + noCmks = false; + helpers.addResult(results, 0, `Key rotation is not available for ${describeKeyData.KeyMetadata.KeySpec} key type`, region, kmsKey.KeyArn); + return; + } + var getKeyRotationStatus = helpers.addSource(cache, source, ['kms', 'getKeyRotationStatus', region, kmsKey.KeyId]); diff --git a/plugins/aws/kms/kmsKeyRotation.spec.js b/plugins/aws/kms/kmsKeyRotation.spec.js index 7635f60df6..e2997c8b4d 100644 --- a/plugins/aws/kms/kmsKeyRotation.spec.js +++ b/plugins/aws/kms/kmsKeyRotation.spec.js @@ -23,7 +23,8 @@ const describeKey = [ KeyState: "Enabled", KeyUsage: "ENCRYPT_DECRYPT", MultiRegion: false, - Origin: "AWS_KMS" + Origin: "AWS_KMS", + KeySpec: "HMAC_512", } }, { @@ -42,7 +43,8 @@ const describeKey = [ KeyState: "Enabled", KeyUsage: "ENCRYPT_DECRYPT", MultiRegion: false, - Origin: "AWS_KMS" + Origin: "AWS_KMS", + KeySpec: "SYMMETRIC_DEFAULT", } }, { @@ -61,7 +63,8 @@ const describeKey = [ KeyState: "PendingDeletion", KeyUsage: "ENCRYPT_DECRYPT", MultiRegion: false, - Origin: "AWS_KMS" + Origin: "AWS_KMS", + KeySpec: "SYMMETRIC_DEFAULT", } } ] @@ -250,5 +253,16 @@ describe('kmsKeyRotation', function () { done(); }); }); + + it('should not pass if key rotation is not avaible for KMS ', function (done) { + const cache = createCache([listKeys], describeKey[0], keyPolicy[0], keyRotationStatus[0]); + kmsKeyRotation.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(0); + expect(results[0].region).to.equal('us-east-1'); + expect(results[0].message).to.include('Key rotation is not available for HMAC_512 key type'); + done(); + }); + }); }); }); \ No newline at end of file diff --git a/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.js b/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.js index 41f1d634fd..bd6c0d9ed3 100644 --- a/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.js +++ b/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.js @@ -10,8 +10,8 @@ module.exports = { more_info: 'AWS Neptune database service releases engine version upgrades regularly to introduce software features, bug fixes, security patches and performance improvements. Enabling auto minor version upgrade feature ensures that minor engine upgrades are applied automatically to the instance during the maintenance window.', recommended_action: 'Modify Neptune database instance and enable automatic minor version upgrades feature.', link: 'https://docs.aws.amazon.com/neptune/latest/userguide/cluster-maintenance.html', - apis: ['Neptune:describeDBClusters'], - realtime_triggers: ['neptune:CreateDBCluster', 'neptune:DeleteDBCluster', 'neptune:ModifyDBCluster'], + apis: ['Neptune:describeDBInstances'], + realtime_triggers: ['neptune:CreateDBInstance', 'neptune:DeleteDBInstance', 'neptune:ModifyDBInstance'], run: function(cache, settings, callback) { var results = []; @@ -19,32 +19,42 @@ module.exports = { var regions = helpers.regions(settings); async.each(regions.neptune, function(region, rcb){ - var describeDBClusters = helpers.addSource(cache, source, - ['neptune', 'describeDBClusters', region]); + var describeDBInstances = helpers.addSource(cache, source, + ['neptune', 'describeDBInstances', region]); - if (!describeDBClusters) return rcb(); + if (!describeDBInstances) return rcb(); - if (describeDBClusters.err || !describeDBClusters.data) { + if (describeDBInstances.err || !describeDBInstances.data) { helpers.addResult(results, 3, - `Unable to list Neptune database instances: ${helpers.addError(describeDBClusters)}`, region); + `Unable to list Neptune database cluster instances: ${helpers.addError(describeDBInstances)}`, region); return rcb(); } - if (!describeDBClusters.data.length) { + if (!describeDBInstances.data.length) { helpers.addResult(results, 0, 'No Neptune database instances found', region); return rcb(); } - for (let cluster of describeDBClusters.data) { - if (!cluster.DBClusterArn) continue; + var noInstance = true; - if (cluster.AutoMinorVersionUpgrade) { - helpers.addResult(results, 0, 'Neptune database instance has auto minor version upgrade enabled', cluster.DBClusterArn, region); + for (let instance of describeDBInstances.data) { + if (!instance.DBInstanceArn || instance.Engine !== 'neptune') continue; + + noInstance = false; + + if (instance.AutoMinorVersionUpgrade) { + helpers.addResult(results, 0, 'Neptune database instance has auto minor version upgrade enabled', region, instance.DBInstanceArn); } else { - helpers.addResult(results, 2, 'Neptune database instance does not have auto minor version upgrade enabled', cluster.DBClusterArn, region); + helpers.addResult(results, 2, 'Neptune database instance does not have auto minor version upgrade enabled', region, instance.DBInstanceArn); } } + + if (noInstance) { + helpers.addResult(results, 0, + 'No Neptune database instances found', region); + } + rcb(); }, function(){ callback(null, results, source); diff --git a/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.spec.js b/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.spec.js index 987b17e121..f6fc1f7ae7 100644 --- a/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.spec.js +++ b/plugins/aws/neptune/neptuneDBMinorVersionUpgrade.spec.js @@ -1,31 +1,27 @@ var expect = require('chai').expect; var neptuneDBMinorVersionUpgrade = require('./neptuneDBMinorVersionUpgrade'); -const describeDBClusters = [ +const describeDBInstances = [ { - "AllocatedStorage": 1, - "BackupRetentionPeriod": 1, - "DbClusterResourceId": "cluster-WNY2ZTZWH4RQ2CTKEEP4GVCPU4", - "DBClusterArn": "arn:aws:rds:us-east-1:000111222333:cluster:database-2", - "AssociatedRoles": [], - "AutoMinorVersionUpgrade": true + DBInstanceIdentifier: "db-neptune-1-instance-1", + Engine: "neptune", + AutoMinorVersionUpgrade: true, + DBClusterIdentifier: "db-neptune-1", + DBInstanceArn: "arn:aws:rds:us-east-1:12341234123:db:db-neptune-1-instance-1" }, { - "AllocatedStorage": 1, - "BackupRetentionPeriod": 1, - "DbClusterResourceId": "cluster-WNY2ZTZWH4RQ2CTKEEP4GVCPU9", - "DBClusterArn": "arn:aws:rds:us-east-1:000111222334:cluster:database-3", - "AssociatedRoles": [], - "AutoMinorVersionUpgrade": false + DBInstanceIdentifier: "db-neptune-1-instance-1", + Engine: "neptune", + AutoMinorVersionUpgrade: false, + DBClusterIdentifier: "db-neptune-1", + DBInstanceArn: "arn:aws:rds:us-east-1:12341234123:db:db-neptune-1-instance-1" } ]; - - const createCache = (clusters, clustersErr) => { return { neptune: { - describeDBClusters: { + describeDBInstances: { 'us-east-1': { err: clustersErr, data: clusters @@ -35,12 +31,10 @@ const createCache = (clusters, clustersErr) => { }; }; - - describe('neptuneDBMinorVersionUpgrade', function () { describe('run', function () { it('should PASS if Neptune database instance has auto minor version upgrade enabled', function (done) { - const cache = createCache([describeDBClusters[0]]); + const cache = createCache([describeDBInstances[0]]); neptuneDBMinorVersionUpgrade.run(cache, {}, (err, results) => { expect(results.length).to.equal(1); expect(results[0].status).to.equal(0); @@ -52,7 +46,7 @@ describe('neptuneDBMinorVersionUpgrade', function () { it('should FAIL if Neptune database instance does not have auto minor version upgrade enabled', function (done) { - const cache = createCache([describeDBClusters[1]]); + const cache = createCache([describeDBInstances[1]]); neptuneDBMinorVersionUpgrade.run(cache, {}, (err, results) => { expect(results.length).to.equal(1); expect(results[0].status).to.equal(2); @@ -62,7 +56,6 @@ describe('neptuneDBMinorVersionUpgrade', function () { }); }); - it('should PASS if no Neptune database instances found', function (done) { const cache = createCache([]); neptuneDBMinorVersionUpgrade.run(cache, {}, (err, results) => { @@ -75,7 +68,7 @@ describe('neptuneDBMinorVersionUpgrade', function () { }); it('should UNKNOWN if unable to list Neptune Database instances', function (done) { - const cache = createCache(null, { message: "Unable to list Neptune database instances" }); + const cache = createCache(null, { message: "Unable to list Neptune database cluster instances" }); neptuneDBMinorVersionUpgrade.run(cache, {}, (err, results) => { expect(results.length).to.equal(1); expect(results[0].status).to.equal(3); diff --git a/plugins/aws/neptune/neptuneDBMultiAz.js b/plugins/aws/neptune/neptuneDBMultiAz.js index c35a34675b..b6b8437666 100644 --- a/plugins/aws/neptune/neptuneDBMultiAz.js +++ b/plugins/aws/neptune/neptuneDBMultiAz.js @@ -8,10 +8,10 @@ module.exports = { severity: 'Medium', description: 'Ensure that AWS Neptune database instances are created to be cross-AZ for high availability.', more_info: 'Enabling Multi-AZ feature for Neptune instances boosts database reliability by automatically replicating data across multiple availability zones. This ensures continuous availability and minimal downtime for graph database deployments.', - recommended_action: 'Modify Neptune database instance to enable multi-AZ feature.', + recommended_action: 'Create new Neptune database instance and enable multi-AZ feature.', link: 'https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-db-clusters.html', apis: ['Neptune:describeDBClusters'], - realtime_triggers: ['neptune:CreateDBCluster', 'neptune:DeleteDBCluster'], + realtime_triggers: ['neptune:CreateDBCluster', 'neptune:DeleteDBCluster'], run: function(cache, settings, callback) { var results = []; @@ -43,12 +43,12 @@ module.exports = { let resource = cluster.DBClusterArn; if (cluster.MultiAZ) { - helpers.addResult(results, 0, 'Neptune database instance has multi-AZ enabled', region, resource); + helpers.addResult(results, 0, 'Neptune database instance has multi-AZ enabled', region, resource); } else { helpers.addResult(results, 2, 'Neptune database instance does not have multi-AZ enabled', region, resource); } } - + rcb(); }, function(){ callback(null, results, source); diff --git a/plugins/azure/loadbalancer/lbPublicIp.js b/plugins/azure/loadbalancer/lbPublicIp.js index ab780e7b98..823080183f 100644 --- a/plugins/azure/loadbalancer/lbPublicIp.js +++ b/plugins/azure/loadbalancer/lbPublicIp.js @@ -2,14 +2,14 @@ const async = require('async'); const helpers = require('../../../helpers/azure'); module.exports = { - title: 'Load Balancer Public IP', + title: 'Public Load Balancer', category: 'Load Balancer', domain: 'Availability', - description: 'Ensures that Azure Load Balancers have public IPs associated.', + description: 'Ensures that Azure Load Balancers are configured as public.', severity: 'Medium', - more_info: 'A public load balancer offers a dedicated IP for Internet-facing access to backend resources. This configuration facilitates efficient egress to the Internet for backend pool members through the assigned frontend IP. It ensures streamlined connectivity and reliable resource availability, simplifying scalability to meet varying demand levels.', - link: 'https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/configure-public-ip-load-balancer', - recommended_action: 'Modify load balancers and add Public IP address.', + more_info: 'To meet your organization\'s security compliance, ensure that load balancers are public to facilitate efficient egress to the Internet for backend pool members through the assigned frontend IP, ensuring streamlined connectivity and reliable resource availability.', + link: 'https://learn.microsoft.com/en-us/azure/load-balancer/load-balancer-overview', + recommended_action: 'Create the Load Balancer with Ip associations as per your organization\'s requirements.', apis: ['loadBalancers:listAll'], realtime_triggers: ['microsoftnetwork:loadbalancers:write', 'microsoftnetwork:loadbalancers:delete'], @@ -42,9 +42,9 @@ module.exports = { if (lb.frontendIPConfigurations && lb.frontendIPConfigurations.length && lb.frontendIPConfigurations.some(ipconfig => ipconfig.properties && ipconfig.properties.publicIPAddress)) { - helpers.addResult(results, 0, 'Load Balancer has public IP associated', location, lb.id); + helpers.addResult(results, 0, 'Load Balancer is configured as public', location, lb.id); } else { - helpers.addResult(results, 2, 'Load Balancer does not have public IP associated', location, lb.id); + helpers.addResult(results, 2, 'Load Balancer is not configured as public', location, lb.id); } } rcb(); diff --git a/plugins/azure/loadbalancer/lbPublicIp.spec.js b/plugins/azure/loadbalancer/lbPublicIp.spec.js index ab21362f6c..4bc7f5f75d 100644 --- a/plugins/azure/loadbalancer/lbPublicIp.spec.js +++ b/plugins/azure/loadbalancer/lbPublicIp.spec.js @@ -104,7 +104,7 @@ describe('lbPublicIp', function() { const callback = (err, results) => { expect(results.length).to.equal(1); expect(results[0].status).to.equal(0); - expect(results[0].message).to.include('Load Balancer has public IP associated'); + expect(results[0].message).to.include('Load Balancer is configured as public'); expect(results[0].region).to.equal('eastus'); done() }; @@ -120,7 +120,7 @@ describe('lbPublicIp', function() { const callback = (err, results) => { expect(results.length).to.equal(1); expect(results[0].status).to.equal(2); - expect(results[0].message).to.include('Load Balancer does not have public IP associated'); + expect(results[0].message).to.include('Load Balancer is not configured as public'); expect(results[0].region).to.equal('eastus'); done() }; diff --git a/plugins/azure/postgresqlserver/flexibleServerLogDuration.js b/plugins/azure/postgresqlserver/flexibleServerLogDuration.js index d640bc6467..69d60c49a7 100644 --- a/plugins/azure/postgresqlserver/flexibleServerLogDuration.js +++ b/plugins/azure/postgresqlserver/flexibleServerLogDuration.js @@ -9,7 +9,7 @@ module.exports = { description: 'Ensures that connection duration logs are enabled for PostgreSQL flexible servers.', more_info: 'Enabling connection duration logs on PostgreSQL flexible servers allows for logging the duration of each completed SQL statement, aiding in performance monitoring, identifying long-running queries, and ensuring compliance with auditing requirements.', recommended_action: 'Ensure the server parameters for each PostgreSQL flexible servers have the log_duration setting enabled.', - link: 'https://learn.microsoft.com/en-us/azure/flexible-server/howto-configure-server-parameters-using-portal', + link: 'https://learn.microsoft.com/en-us/azure/flexible-server/how-to-configure-server-parameters-using-portal', apis: ['servers:listPostgresFlexibleServer', 'flexibleServersConfigurations:listByPostgresServer'], realtime_triggers: ['microsoftdbforpostgresql:flexibleservers:write','microsoftdbforpostgresql:flexibleservers:delete','microsoftdbforpostgresql:flexibleservers:configurations:write'], diff --git a/plugins/azure/sqlserver/auditOperationsEnabled.js b/plugins/azure/sqlserver/auditOperationsEnabled.js index 586531b1bb..b35b4f50b0 100644 --- a/plugins/azure/sqlserver/auditOperationsEnabled.js +++ b/plugins/azure/sqlserver/auditOperationsEnabled.js @@ -40,7 +40,7 @@ module.exports = { const devOpsAuditingSettings = helpers.addSource(cache, source, ['devOpsAuditingSettings', 'list', location, server.id]); - if (!devOpsAuditingSettings || devOpsAuditingSettings.err || !devOpsAuditingSettings.data) { + if (!devOpsAuditingSettings || devOpsAuditingSettings.err || !devOpsAuditingSettings.data || !devOpsAuditingSettings.data.length) { helpers.addResult(results, 3, 'Unable to query Auditing Policies: ' + helpers.addError(devOpsAuditingSettings), location, server.id); } else { @@ -50,10 +50,7 @@ module.exports = { } else { helpers.addResult(results, 2, 'Microsoft support operations auditing is not enabled on SQL server', location, server.id); } - } else { - helpers.addResult(results, 2, 'No existing auditing policies found', location, server.id); } - } }); @@ -62,4 +59,4 @@ module.exports = { callback(null, results, source); }); } -}; +}; \ No newline at end of file diff --git a/plugins/azure/storageaccounts/blobServiceLoggingEnabled.js b/plugins/azure/storageaccounts/blobServiceLoggingEnabled.js index 0cd050873f..a1f6853f72 100644 --- a/plugins/azure/storageaccounts/blobServiceLoggingEnabled.js +++ b/plugins/azure/storageaccounts/blobServiceLoggingEnabled.js @@ -38,30 +38,37 @@ module.exports = { for (let storageAccount of storageAccounts.data) { if (!storageAccount.id) continue; - const diagnosticSettings = helpers.addSource(cache, source, - ['diagnosticSettings', 'listByBlobServices', location, storageAccount.id]); - - if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { - helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); + if (storageAccount.sku && + storageAccount.sku.tier && + storageAccount.sku.tier.toLowerCase() == 'premium') { + helpers.addResult(results, 0, 'Storage Account tier is premium', location, storageAccount.id); } else { - //First consider that all the logs are missing then remove the ones that are present - var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; - diagnosticSettings.data.forEach(settings => { - const logs = settings.logs; - missingLogs = missingLogs.filter(requiredCategory => - !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) - ); - }); + const diagnosticSettings = helpers.addSource(cache, source, + ['diagnosticSettings', 'listByBlobServices', location, storageAccount.id]); + - if (missingLogs.length) { - helpers.addResult(results, 2, `Storage Account does not have logging enabled for blob service. Missing Logs ${missingLogs}`, location, storageAccount.id); + if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { + helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); } else { - helpers.addResult(results, 0, 'Storage Account has logging enabled for blob service read, write and delete requests', location, storageAccount.id); + //First consider that all the logs are missing then remove the ones that are present + var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; + + diagnosticSettings.data.forEach(settings => { + const logs = settings.logs; + missingLogs = missingLogs.filter(requiredCategory => + !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) + ); + }); + + if (missingLogs.length) { + helpers.addResult(results, 2, `Storage Account does not have logging enabled for blob service. Missing Logs ${missingLogs}`, location, storageAccount.id); + } else { + helpers.addResult(results, 0, 'Storage Account has logging enabled for blob service read, write and delete requests', location, storageAccount.id); + } } } - } rcb(); diff --git a/plugins/azure/storageaccounts/blobServiceLoggingEnabled.spec.js b/plugins/azure/storageaccounts/blobServiceLoggingEnabled.spec.js index 6eb476b4a0..4f4ae422c4 100644 --- a/plugins/azure/storageaccounts/blobServiceLoggingEnabled.spec.js +++ b/plugins/azure/storageaccounts/blobServiceLoggingEnabled.spec.js @@ -8,6 +8,16 @@ const storageAccounts = [ name: 'csb100320011e293683', type: 'Microsoft.Storage/storageAccounts', location: 'eastus', + }, + { + kind: 'StorageV2', + id: '/subscriptions/1234/resourceGroups/cloud-shell-storage-eastus/providers/Microsoft.Storage/storageAccounts/csb100320011e293683', + name: 'csb100320011e293683', + type: 'Microsoft.Storage/storageAccounts', + location: 'eastus', + sku: { + tier: 'Premium' + } } ]; @@ -171,6 +181,18 @@ describe('blobServiceLoggingEnabled', function () { }); }); + it('should PASS if storage account tier is premium', function (done) { + const cache = createCache([storageAccounts[1]], []); + blobServiceLoggingEnabled.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(0); + expect(results[0].region).to.equal('eastus'); + expect(results[0].message).to.equal('Storage Account tier is premium'); + + done(); + }); + }); + it('should UNKNOWN if Unable to query for for storage accounts', function (done) { const cache = createErrorCache('diagnostic'); blobServiceLoggingEnabled.run(cache, {}, (err, results) => { diff --git a/plugins/azure/storageaccounts/queueServiceLoggingEnabled.js b/plugins/azure/storageaccounts/queueServiceLoggingEnabled.js index ac33e6263f..a5eb3e0ca4 100644 --- a/plugins/azure/storageaccounts/queueServiceLoggingEnabled.js +++ b/plugins/azure/storageaccounts/queueServiceLoggingEnabled.js @@ -12,7 +12,7 @@ module.exports = { link: 'https://learn.microsoft.com/en-us/azure/storage/queues/monitor-queue-storage?tabs=azure-portal', apis: ['storageAccounts:list', 'storageAccounts:listKeys','diagnosticSettings:listByQueueServices'], realtime_triggers: ['microsoftstorage:storageaccounts:write', 'microsoftstorage:storageaccounts:delete'], - + run: function(cache, settings, callback) { var results = []; var source = {}; @@ -39,26 +39,33 @@ module.exports = { if (!storageAccount.id) continue; - const diagnosticSettings = helpers.addSource(cache, source, - ['diagnosticSettings', 'listByQueueServices', location, storageAccount.id]); - - if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { - helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); + if (storageAccount.sku && + storageAccount.sku.tier && + storageAccount.sku.tier.toLowerCase() == 'premium') { + helpers.addResult(results, 0, 'Storage Account tier is premium', location, storageAccount.id); } else { - //First consider that all the logs are missing then remove the ones that are present - var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; - diagnosticSettings.data.forEach(settings => { - const logs = settings.logs; - missingLogs = missingLogs.filter(requiredCategory => - !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) - ); - }); + const diagnosticSettings = helpers.addSource(cache, source, + ['diagnosticSettings', 'listByQueueServices', location, storageAccount.id]); - if (missingLogs.length) { - helpers.addResult(results, 2, `Storage Account does not have logging enabled for queue service. Missing Logs ${missingLogs}`, location, storageAccount.id); + if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { + helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); } else { - helpers.addResult(results, 0, 'Storage Account has logging enabled for queue service read, write and delete requests', location, storageAccount.id); + //First consider that all the logs are missing then remove the ones that are present + var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; + + diagnosticSettings.data.forEach(settings => { + const logs = settings.logs; + missingLogs = missingLogs.filter(requiredCategory => + !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) + ); + }); + + if (missingLogs.length) { + helpers.addResult(results, 2, `Storage Account does not have logging enabled for queue service. Missing Logs ${missingLogs}`, location, storageAccount.id); + } else { + helpers.addResult(results, 0, 'Storage Account has logging enabled for queue service read, write and delete requests', location, storageAccount.id); + } } } } diff --git a/plugins/azure/storageaccounts/queueServiceLoggingEnabled.spec.js b/plugins/azure/storageaccounts/queueServiceLoggingEnabled.spec.js index 1baa3d7405..12c0b26261 100644 --- a/plugins/azure/storageaccounts/queueServiceLoggingEnabled.spec.js +++ b/plugins/azure/storageaccounts/queueServiceLoggingEnabled.spec.js @@ -18,6 +18,16 @@ const storageAccounts = [ primaryEndpoints: [Object], primaryLocation: 'eastus', statusOfPrimary: 'available' + }, + { + kind: 'StorageV2', + id: '/subscriptions/1234/resourceGroups/cloud-shell-storage-eastus/providers/Microsoft.Storage/storageAccounts/csb100320011e293683', + name: 'csb100320011e293683', + type: 'Microsoft.Storage/storageAccounts', + location: 'eastus', + sku: { + tier: 'Premium' + } } ]; @@ -183,6 +193,18 @@ describe('queueServiceLoggingEnabled', function () { }); }); + it('should PASS if storage account tier is premium', function (done) { + const cache = createCache([storageAccounts[1]], []); + queueServiceLoggingEnabled.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(0); + expect(results[0].region).to.equal('eastus'); + expect(results[0].message).to.equal('Storage Account tier is premium'); + + done(); + }); + }); + it('should UNKNOWN if Unable to query for for storage accounts', function (done) { const cache = createErrorCache('storageAccounts'); queueServiceLoggingEnabled.run(cache, {}, (err, results) => { diff --git a/plugins/azure/storageaccounts/tableServiceLoggingEnabled.js b/plugins/azure/storageaccounts/tableServiceLoggingEnabled.js index 4279f40dfa..074000d1a5 100644 --- a/plugins/azure/storageaccounts/tableServiceLoggingEnabled.js +++ b/plugins/azure/storageaccounts/tableServiceLoggingEnabled.js @@ -13,7 +13,7 @@ module.exports = { link: 'https://learn.microsoft.com/en-us/azure/storage/tables/monitor-table-storage?tabs=azure-portal', apis: ['storageAccounts:list', 'storageAccounts:listKeys', 'diagnosticSettings:listByTableServices'], realtime_triggers: ['microsoftstorage:storageaccounts:write', 'microsoftstorage:storageaccounts:delete'], - + run: function(cache, settings, callback) { var results = []; var source = {}; @@ -38,27 +38,34 @@ module.exports = { for (let storageAccount of storageAccounts.data) { if (!storageAccount.id) continue; - const diagnosticSettings = helpers.addSource(cache, source, - ['diagnosticSettings', 'listByTableServices', location, storageAccount.id]); - - - if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { - helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); + if (storageAccount.sku && + storageAccount.sku.tier && + storageAccount.sku.tier.toLowerCase() == 'premium') { + helpers.addResult(results, 0, 'Storage Account tier is premium', location, storageAccount.id); } else { - //First consider that all the logs are missing then remove the ones that are present - var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; - diagnosticSettings.data.forEach(settings => { - const logs = settings.logs; - missingLogs = missingLogs.filter(requiredCategory => - !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) - ); - }); + const diagnosticSettings = helpers.addSource(cache, source, + ['diagnosticSettings', 'listByTableServices', location, storageAccount.id]); - if (missingLogs.length) { - helpers.addResult(results, 2, `Storage Account does not have logging enabled for table service. Missing Logs ${missingLogs}`, location, storageAccount.id); + + if (!diagnosticSettings || diagnosticSettings.err || !diagnosticSettings.data) { + helpers.addResult(results, 3, 'Unable to query Storage Account diagnostics settings: ' + helpers.addError(diagnosticSettings), location, storageAccount.id); } else { - helpers.addResult(results, 0, 'Storage Account has logging enabled for table service read, write and delete requests', location, storageAccount.id); + //First consider that all the logs are missing then remove the ones that are present + var missingLogs = ['StorageRead', 'StorageWrite','StorageDelete']; + + diagnosticSettings.data.forEach(settings => { + const logs = settings.logs; + missingLogs = missingLogs.filter(requiredCategory => + !logs.some(log => (log.category === requiredCategory && log.enabled) || log.categoryGroup === 'allLogs' && log.enabled) + ); + }); + + if (missingLogs.length) { + helpers.addResult(results, 2, `Storage Account does not have logging enabled for table service. Missing Logs ${missingLogs}`, location, storageAccount.id); + } else { + helpers.addResult(results, 0, 'Storage Account has logging enabled for table service read, write and delete requests', location, storageAccount.id); + } } } } diff --git a/plugins/azure/storageaccounts/tableServiceLoggingEnabled.spec.js b/plugins/azure/storageaccounts/tableServiceLoggingEnabled.spec.js index 8edf6763bb..c6da89bd60 100644 --- a/plugins/azure/storageaccounts/tableServiceLoggingEnabled.spec.js +++ b/plugins/azure/storageaccounts/tableServiceLoggingEnabled.spec.js @@ -18,6 +18,16 @@ const storageAccounts = [ primaryEndpoints: [Object], primaryLocation: 'eastus', statusOfPrimary: 'available' + }, + { + kind: 'StorageV2', + id: '/subscriptions/1234/resourceGroups/cloud-shell-storage-eastus/providers/Microsoft.Storage/storageAccounts/csb100320011e293683', + name: 'csb100320011e293683', + type: 'Microsoft.Storage/storageAccounts', + location: 'eastus', + sku: { + tier: 'Premium' + } } ]; @@ -183,6 +193,18 @@ describe('tableServiceLoggingEnabled', function () { }); }); + it('should PASS if storage account tier is premium', function (done) { + const cache = createCache([storageAccounts[1]], []); + tableServiceLoggingEnabled.run(cache, {}, (err, results) => { + expect(results.length).to.equal(1); + expect(results[0].status).to.equal(0); + expect(results[0].region).to.equal('eastus'); + expect(results[0].message).to.equal('Storage Account tier is premium'); + + done(); + }); + }); + it('should UNKNOWN if Unable to query for for storage accounts', function (done) { const cache = createErrorCache('storageAccounts'); tableServiceLoggingEnabled.run(cache, {}, (err, results) => {