Skip to content

Block delete_bucket requests by an OBC account #8331

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion src/endpoint/s3/s3_rest.js
Original file line number Diff line number Diff line change
Expand Up @@ -230,9 +230,16 @@ async function authorize_request_policy(req) {
const account = req.object_sdk.requesting_account;
const account_identifier_name = req.object_sdk.nsfs_config_root ? account.name.unwrap() : account.email.unwrap();
const account_identifier_id = req.object_sdk.nsfs_config_root ? account._id : undefined;
const is_system_owner = Boolean(system_owner) && system_owner.unwrap() === account_identifier_name;

// deny delete_bucket permissions from bucket_claim_owner accounts (accounts that were created by OBC from openshift\k8s)
// the OBC bucket can still be delete by normal accounts according to the access policy which is checked below
if (req.op_name === 'delete_bucket' && account.bucket_claim_owner) {
dbg.error(`delete bucket request by an OBC account is restricted. an attempt to delete bucket by account ${account_identifier_name}`);
throw new S3Error(S3Error.AccessDenied);
}

// @TODO: System owner as a construct should be removed - Temporary
const is_system_owner = Boolean(system_owner) && system_owner.unwrap() === account_identifier_name;
if (is_system_owner) return;
Copy link
Contributor

@jackyalbo jackyalbo Sep 10, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please move line 233 close to the check in line 245. I mean, put your code block before or after this check, not in the middle. also, I would move the @todo before the const, just as a side note...


const is_owner = (function() {
Expand Down
59 changes: 51 additions & 8 deletions src/test/unit_tests/test_s3_ops.js
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,48 @@ mocha.describe('s3_ops', function() {
const res = await s3.listBuckets({});
assert(!res.Buckets.find(bucket => bucket.Name === BKT1));
});

// Test that bucket_deletion is not allowed for an obc account
mocha.it('OBC account should not be allowed to delete a bucket', async function() {
// create the bucket to be deleted. use rpc to create, which is the same flow as the operator does
await rpc_client.bucket.create_bucket({ name: "obc-bucket" });
// create an obc account that is the bucket_claim_owner of the bucket
const obc_account = await rpc_client.account.create_account({
name: "obc-account",
email: "[email protected]",
has_login: false,
s3_access: true,
bucket_claim_owner: "obc-bucket",
});
const obc_s3_client = new S3({
endpoint: coretest.get_http_address(),
credentials: {
accessKeyId: obc_account.access_keys[0].access_key.unwrap(),
secretAccessKey: obc_account.access_keys[0].secret_key.unwrap(),
},
forcePathStyle: true,
region: config.DEFAULT_REGION,
requestHandler: new NodeHttpHandler({
httpAgent: http_utils.get_unsecured_agent(coretest.get_http_address()),
}),
});
try {
await obc_s3_client.deleteBucket({ Bucket: "obc-bucket" });
assert.fail('expected AccessDenied error. bucket deletion should not be allowed for obc account');
} catch (err) {
assert.strictEqual(err.Code, 'AccessDenied');
assert.strictEqual(err.$metadata.httpStatusCode, 403);
}

try {
// bucket deletion should be allowed for regular accounts (not obc)
await s3.deleteBucket({ Bucket: "obc-bucket" });
} catch (err) {
assert.fail('expected bucket deletion to be successful for regular accounts');
}
// cleanup
await rpc_client.account.delete_account({ email: "[email protected]" });
});
});

async function test_object_ops(bucket_name, bucket_type, caching, remote_endpoint_options) {
Expand Down Expand Up @@ -201,7 +243,7 @@ mocha.describe('s3_ops', function() {
const ENDPOINT_TYPE = USE_REMOTE_ENDPOINT ? process.env.ENDPOINT_TYPE : 'S3_COMPATIBLE';
const AWS_ACCESS_KEY_ID = USE_REMOTE_ENDPOINT ? process.env.AWS_ACCESS_KEY_ID : s3_client_params.credentials.accessKeyId;
const AWS_SECRET_ACCESS_KEY = USE_REMOTE_ENDPOINT ? process.env.AWS_SECRET_ACCESS_KEY :
s3_client_params.credentials.secretAccessKey;
s3_client_params.credentials.secretAccessKey;
coretest.log("before creating azure target bucket: ", source_namespace_bucket, target_namespace_bucket);
if (is_azure_mock) {
coretest.log("creating azure target bucket: ", source_namespace_bucket, target_namespace_bucket);
Expand Down Expand Up @@ -341,7 +383,7 @@ mocha.describe('s3_ops', function() {
s3.middlewareStack.add(next => args => {
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
return next(args);
}, {step: 'build', name: 'getFromCache'});
}, { step: 'build', name: 'getFromCache' });
res = await s3.getObjectTagging(params_req);
s3.middlewareStack.remove('getFromCache');
} else {
Expand Down Expand Up @@ -404,7 +446,7 @@ mocha.describe('s3_ops', function() {
s3.middlewareStack.add(next => args => {
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
return next(args);
}, {step: 'build', name: 'getFromCache'});
}, { step: 'build', name: 'getFromCache' });
res = await s3.getObjectTagging(params_req);
s3.middlewareStack.remove('getFromCache');
} else {
Expand Down Expand Up @@ -604,7 +646,7 @@ mocha.describe('s3_ops', function() {
CopySource: `/${bucket_name}/${text_file1}`,
});

const res = await s3.getObjectTagging({Bucket: bucket_name, Key: text_file2});
const res = await s3.getObjectTagging({ Bucket: bucket_name, Key: text_file2 });

assert.strictEqual(res.TagSet.length, params.Tagging.TagSet.length, 'Should be 1');
assert.strictEqual(res.TagSet[0].Value, params.Tagging.TagSet[0].Value);
Expand All @@ -631,7 +673,7 @@ mocha.describe('s3_ops', function() {
Key: text_file2,
CopySource: `/${other_platform_bucket}/${text_file1}`,
});
const res = await s3.getObjectTagging({Bucket: bucket_name, Key: text_file2});
const res = await s3.getObjectTagging({ Bucket: bucket_name, Key: text_file2 });
assert.strictEqual(res.TagSet.length, 1, 'Should be 1');
assert.strictEqual(res.TagSet[0].Value, params.Tagging.TagSet[0].Value);
assert.strictEqual(res.TagSet[0].Key, params.Tagging.TagSet[0].Key);
Expand Down Expand Up @@ -808,7 +850,7 @@ mocha.describe('s3_ops', function() {
s3.middlewareStack.add(next => args => {
args.request.query.get_from_cache = 'true'; // represents query_params = { get_from_cache: true }
return next(args);
}, {step: 'build', name: 'getFromCache'});
}, { step: 'build', name: 'getFromCache' });
const res2 = await s3.listObjects(params_req);
s3.middlewareStack.remove('getFromCache');

Expand All @@ -825,7 +867,7 @@ mocha.describe('s3_ops', function() {
mocha.it('should delete non existing object without failing', async function() {
this.timeout(60000);
const key_to_delete = 'non-existing-obj';
const res = await s3.deleteObject({ Bucket: bucket_name, Key: key_to_delete});
const res = await s3.deleteObject({ Bucket: bucket_name, Key: key_to_delete });
const res_without_metadata = _.omit(res, '$metadata');
assert.deepEqual(res_without_metadata, {});

Expand All @@ -838,7 +880,8 @@ mocha.describe('s3_ops', function() {
{ Key: 'non-existing-obj2' },
{ Key: 'non-existing-obj3' }
];
const res = await s3.deleteObjects({ Bucket: bucket_name,
const res = await s3.deleteObjects({
Bucket: bucket_name,
Delete: {
Objects: keys_to_delete
}
Expand Down