diff --git a/boot/setup.sh b/boot/setup.sh index 6796643..0a9ad49 100755 --- a/boot/setup.sh +++ b/boot/setup.sh @@ -193,6 +193,22 @@ function setup_rotation_cron { echo "JWT rotation cron job installed successfully" } +function manta_setup_poll_interval { + echo "Configuring UFDS_POLL_INTERVAL for authcache replicator" + local current_interval="" + current_interval=$(get_sapi_metadata UFDS_POLL_INTERVAL) + + if [[ -z "$current_interval" ]]; then + echo "Setting UFDS_POLL_INTERVAL to 2000ms (authcache hot path)" + if ! $SVC_ROOT/boot/set-sapi-metadata.sh UFDS_POLL_INTERVAL "2000"; then + echo "Warning: Failed to set UFDS_POLL_INTERVAL in SAPI" >&2 + echo "Replicator will use template default (2000ms)" >&2 + fi + else + echo "UFDS_POLL_INTERVAL already set to ${current_interval}ms" + fi +} + function manta_setup_auth { svccfg import $SVC_ROOT/smf/manifests/mahi.xml svcadm enable mahi @@ -240,6 +256,9 @@ if [[ ${FLAVOR} == "manta" ]]; then echo "Setting up JWT rotation cron job" setup_rotation_cron + echo "Setting up replicator poll interval" + manta_setup_poll_interval + # set up log rotation for mahiv2 first so logadm rotates logs properly manta_add_logadm_entry "mahi-replicator" manta_add_logadm_entry "mahi-server" diff --git a/docs/operator-endpoints.md b/docs/operator-endpoints.md new file mode 100644 index 0000000..8adc804 --- /dev/null +++ b/docs/operator-endpoints.md @@ -0,0 +1,97 @@ +# Operator Endpoints + +Mahi exposes two internal endpoints for direct Redis manipulation. +These are on the mahi server port (80) and are not exposed to +external S3 clients. + +## POST /cache-push/:accesskeyid + +Writes an access key directly to Redis, bypassing the replicator +poll interval (~2s). Called by CloudAPI after creating or updating +keys for immediate availability. + +**Request body:** + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| accesskeysecret | string | yes | Secret access key | +| ownerUuid | string | yes | Owner account UUID | +| status | string | no | Key status (default: "Active") | +| scope | string\|null | no | Scope JSON string or null | + +**Behavior:** + +- Active keys are written in the unified format via the shared + builder (`redis-accesskey-format.js`), identical to the + replicator output. +- Inactive keys are removed from Redis (reverse lookup deleted, + key entry removed from user record). +- Uses `Date.now()` as the write version, which is always greater + than the replicator's changenumber. This ensures cachePush + always wins over a concurrent replicator write. +- Idempotent. Repeated calls overwrite the previous entry. +- Best-effort: failure is logged at warn level by the CloudAPI + caller but does not block the CloudAPI response. + +**Example:** + +``` +curl -X POST http://authcache.coal.joyent.us/cache-push/abc123 \ + -H 'Content-Type: application/json' \ + -d '{ + "accesskeysecret": "tdc_...", + "ownerUuid": "fe3617d8-...", + "status": "Active", + "scope": "{\"version\":1,\"permissions\":[{\"bucket\":\"my-bucket\",\"level\":\"read\"}]}" + }' +``` + +## POST /key-revoke/:accesskeyid + +Removes an access key from Redis immediately and writes a +revocation tombstone with a 24-hour TTL. The replicator checks +for the tombstone before writing a key — if present, the write +is skipped, making the revocation durable across replication +cycles. + +Use for emergency revocation of compromised keys without waiting +for the UFDS delete to propagate through the replicator. + +**Request body:** None required. + +**Behavior:** + +- Deletes the key entry from the user record in Redis. +- Deletes the reverse lookup at `/accesskey/:accesskeyid`. +- Writes a tombstone at `/revoked/:accesskeyid` with a 24-hour + TTL (`SETEX`). +- Repeated calls renew the tombstone TTL. +- Returns 404 if the key is not found in Redis. + +**Important:** Revocation is temporary. The replicator will +re-add the key on its next cycle once the tombstone expires +(24 hours) if the key still exists in UFDS. To permanently +revoke a key: + +1. Call `DELETE /:account/accesskeys/:id` via CloudAPI + (which deletes from UFDS and calls key-revoke automatically). +2. Or: call key-revoke for immediate effect, then delete + from UFDS within 24 hours. + +**Example:** + +``` +curl -X POST http://authcache.coal.joyent.us/key-revoke/abc123 +``` + +**Response:** + +```json +{ + "revoked": true, + "accessKeyId": "abc123", + "userUuid": "fe3617d8-...", + "tombstoneTtlSeconds": 86400, + "replicationWarning": "Key removed from Redis cache and revocation tombstone set (86400s TTL). Delete from UFDS via CloudAPI to permanently revoke." +} +``` diff --git a/lib/redis-accesskey-format.js b/lib/redis-accesskey-format.js new file mode 100644 index 0000000..d3ebfb2 --- /dev/null +++ b/lib/redis-accesskey-format.js @@ -0,0 +1,158 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +/* + * Copyright 2026 Edgecast Cloud LLC. + */ + +/* + * Canonical Redis entry builders for permanent access keys. + * + * Two code paths write permanent key data to Redis: + * 1. Replicator transforms (UFDS → Redis sync) + * 2. cachePush endpoint (CloudAPI → Redis shortcut) + * + * Both must produce identical structures. This module + * is the single source of truth for the Redis format + * so the invariant is enforced by construction, not by + * convention. + * + * Permanent key format (in /uuid/{uuid}.accesskeys): + * { secret: string, scope: string|null } + * + * Reverse lookup format (in /accesskey/{keyId}): + * { type: "accesskey", accessKeyId, userUuid, + * credentialType: "permanent", scope: string|null } + * + * Scope values for permanent keys: + * null — key is unscoped (unrestricted access) + * JSON str — key is scoped (e.g. '{"version":1,...}') + * "" — preserved as-is; downstream parseScope + * returns null, causing fail-closed deny + * + * Scope values for STS temporary credentials (in sts.js, + * NOT built by this module): + * "none" — parent key was explicitly unscoped + * JSON str — inherited from scoped parent key + * null — legacy pre-sentinel temp credential + */ + + +/** + * @brief Build the accesskeys map entry for a permanent key + * + * Stored at /uuid/{ownerUuid}.accesskeys[accessKeyId]. + * + * @param {string} secret - Secret access key + * @param {string|null} scope - Scope JSON string or null + * @param {number} [version] - Write version (changenumber or + * 0 for unversioned). Used by the replicator and cachePush + * to prevent stale writes from overwriting newer data. + * @return {Object} { secret, scope, version } + */ +function buildPermanentKeyEntry(secret, scope, version) { + /* + * Warn on malformed scope JSON but store the value + * as-is. Downstream parseScope() will fail to parse + * it and enforceBucketScope will deny the request + * (fail-closed). We do NOT replace with null because + * null means "unrestricted" (fail-open). We do NOT + * throw because callers are in async callback chains + * without try/catch guards. + */ + if (scope != null && scope !== '') { + try { + JSON.parse(scope); + } catch (e) { + /* global console */ + console.error( + 'buildPermanentKeyEntry: invalid ' + + 'scope JSON (storing as-is, will ' + + 'fail-closed on enforcement): ' + + e.message); + } + } + return ({ + secret: secret, + scope: (scope != null) ? scope : null, + version: (version != null) ? version : 0 + }); +} + + +/** + * @brief Build the reverse-lookup entry for a permanent key + * + * Stored at /accesskey/{accessKeyId}. + * + * @param {string} accessKeyId - Access key ID + * @param {string} userUuid - Owner UUID + * @param {string|null} scope - Scope JSON string or null + * @param {number} [version] - Write version (see + * buildPermanentKeyEntry) + * @return {Object} Reverse-lookup entry + */ +function buildPermanentKeyLookup(accessKeyId, userUuid, scope, version) { + return ({ + type: 'accesskey', + accessKeyId: accessKeyId, + userUuid: userUuid, + credentialType: 'permanent', + scope: (scope != null) ? scope : null, + version: (version != null) ? version : 0 + }); +} + + +/* + * Revocation tombstone constants and helpers. + * + * When an operator calls POST /scope-revoke/:accesskeyid, + * a tombstone key is written to Redis with a TTL. The + * replicator checks for the tombstone before writing a key + * to Redis — if present, the write is skipped, making the + * revocation durable across replication cycles. + * + * The tombstone auto-expires after REVOKE_TTL_SECONDS. + * The operator should delete the key from UFDS (via CloudAPI) + * before the tombstone expires to make revocation permanent. + * Repeated scope-revoke calls renew the tombstone TTL. + */ +var REVOKE_TTL_SECONDS = 86400; // 24 hours + + +/** + * @brief Build the Redis key path for a revocation tombstone + * + * @param {string} accesskeyid - Access key ID + * @return {string} Redis key path + */ +function revokedKeyPath(accesskeyid) { + return ('/revoked/' + accesskeyid); +} + + +/** + * @brief Build the revocation tombstone value + * + * @param {string} userUuid - Owner UUID of the revoked key + * @return {Object} Tombstone data + */ +function buildRevocationTombstone(userUuid) { + return ({ + revokedAt: Date.now(), + userUuid: userUuid + }); +} + + +module.exports = { + buildPermanentKeyEntry: buildPermanentKeyEntry, + buildPermanentKeyLookup: buildPermanentKeyLookup, + REVOKE_TTL_SECONDS: REVOKE_TTL_SECONDS, + revokedKeyPath: revokedKeyPath, + buildRevocationTombstone: buildRevocationTombstone +}; diff --git a/lib/replicator/main.js b/lib/replicator/main.js index 37c35ed..c83f7a7 100644 --- a/lib/replicator/main.js +++ b/lib/replicator/main.js @@ -96,6 +96,16 @@ function main() { level: process.env.LOG_LEVEL || 'info' }); + // Defensive: default to 2s if interval is missing or invalid. + // Only the authcache (manta) template uses {{UFDS_POLL_INTERVAL}}; + // SDC hardcodes 10000 in its template, so this path only fires + // on authcache where 2s is the correct value for the S3 hot path. + if (!ufdsConfig.interval || (typeof (ufdsConfig.interval)) !== 'number') { + log.warn({configInterval: ufdsConfig.interval}, + 'ufds.interval missing or invalid in config, defaulting to 2000ms'); + ufdsConfig.interval = 2000; + } + ufdsConfig.url = opts['ufds-url'] || ufdsConfig.url; redisConfig.host = opts['redis-host'] || redisConfig.host; redisConfig.port = opts['redis-port'] || redisConfig.port; diff --git a/lib/replicator/transforms/accesskey.js b/lib/replicator/transforms/accesskey.js index c4bf196..17766dd 100644 --- a/lib/replicator/transforms/accesskey.js +++ b/lib/replicator/transforms/accesskey.js @@ -10,15 +10,18 @@ var assert = require('assert-plus'); var sprintf = require('util').format; +var akFormat = require('../../redis-accesskey-format'); function add(opts, cb) { assert.object(opts, 'opts'); assert.object(opts.changes, 'opts.changes'); + assert.object(opts.entry, 'opts.entry'); assert.object(opts.log, 'opts.log'); assert.object(opts.redis, 'opts.redis'); assert.func(cb, 'callback'); var changes = opts.changes; + var entry = opts.entry; var log = opts.log; var redis = opts.redis; @@ -69,6 +72,8 @@ function add(opts, cb) { var sessionToken = changes.sessiontoken ? changes.sessiontoken[0] : null; var principalUuid = changes.principaluuid ? changes.principaluuid[0] : null; var assumedRole = changes.assumedrole ? changes.assumedrole[0] : null; + var accessKeyScope = changes.accesskeyscope ? + changes.accesskeyscope[0] : null; // Skip expired temporary credentials during replication if (credentialType === 'temporary' && expiration) { @@ -84,6 +89,37 @@ function add(opts, cb) { } } + /* + * Check for a revocation tombstone before writing. + * If an operator called POST /scope-revoke for this key, + * a tombstone exists in Redis with a TTL. While the + * tombstone is present, skip writing the key to Redis + * so the revocation is durable across replication cycles. + * + * DECISION: fail-closed on tombstone check errors. + * If the Redis GET for the tombstone fails, we abort + * the replication for this entry rather than proceeding. + * Rationale: a revoked key re-appearing in Redis is a + * security violation; a delayed replication retry is + * not. The replicator's backoff/retry handles transient + * Redis errors. If this proves too aggressive under + * sustained Redis pressure, switch to fail-open (log + * warn, proceed with write). + */ + var revokedKey = akFormat.revokedKeyPath(accesskeyid); + redis.get(revokedKey, function (revErr, revoked) { + if (revErr) { + cb(revErr); + return; + } + if (revoked) { + log.info({ accesskeyid: accesskeyid }, + 'accesskey.add: skipping revoked key' + + ' (tombstone present)'); + cb(null, batch); + return; + } + redis.get(key, function (err, res) { if (err) { cb(err); @@ -93,6 +129,62 @@ function add(opts, cb) { var payload = res ? JSON.parse(res) : {}; payload.accesskeys = payload.accesskeys || {}; + /* + * Version check: skip the write if the existing entry + * has a higher version (e.g. written by cachePush). + * Absent version = 0. + * + * INVARIANT: version values are comparable by > only + * because cachePush uses Date.now() (~10^12) and the + * replicator uses changenumber (~10^3–10^6). cachePush + * always wins, which is correct since it carries the + * freshest data. If either domain changes, this + * comparison breaks. + */ + var changenumber = parseInt(entry.changenumber, 10); + // if the changenumber invariant is broken we need + // to know it. + assert.ok(Number.isFinite(changenumber) && + changenumber > 0, + 'changenumber must be > 0, received: ' + + entry.changenumber); + + /* + * Sanity check: changenumber should always be + * well below Date.now() (~10^12). If it ever + * exceeds 10^9 the version comparison with + * cachePush timestamps breaks silently. + */ + if (changenumber > 1e9) { + log.error({ + changenumber: changenumber, + accesskeyid: accesskeyid + }, 'accesskey.add: changenumber exceeds' + + ' safety ceiling (version comparison' + + ' with cachePush may be broken)'); + } + + if (credentialType === 'permanent') { + var existingKeyData = + payload.accesskeys[accesskeyid]; + var existingVersion = 0; + if (existingKeyData && + typeof (existingKeyData) === 'object') { + existingVersion = + existingKeyData.version || 0; + } + if (existingVersion > changenumber) { + log.info({ + accesskeyid: accesskeyid, + existingVersion: existingVersion, + changenumber: changenumber + }, 'accesskey.add: skipping stale' + + ' write (existing version newer)'); + cb(null, batch); + return; + } + } + // Store enhanced credential data for temporary credentials if (credentialType === 'temporary') { payload.accesskeys[accesskeyid] = { @@ -104,8 +196,16 @@ function add(opts, cb) { assumedRole: assumedRole }; } else { - // Keep legacy format for permanent credentials - payload.accesskeys[accesskeyid] = accesskeysecret; + /* + * Permanent credential: always store as object + * with secret, scope, and version. Unscoped keys + * use scope: null. This unified format eliminates + * typeof branching in sigv4.js consumers. + */ + payload.accesskeys[accesskeyid] = + akFormat.buildPermanentKeyEntry( + accesskeysecret, accessKeyScope, + changenumber); } batch.set(key, JSON.stringify(payload)); @@ -133,6 +233,11 @@ function add(opts, cb) { credentialData.assumedRole = assumedRole; } + // Preserve bucket scope from parent key + if (accessKeyScope) { + credentialData.bucketScope = accessKeyScope; + } + var credentialJson = JSON.stringify(credentialData); batch.set(accessKeyLookupKey, credentialJson); @@ -142,12 +247,26 @@ function add(opts, cb) { hasAssumedRole: !!assumedRole }, 'Storing full credential data for temporary credential'); } else { - // For permanent credentials, store just the UUID (legacy behavior) - batch.set(accessKeyLookupKey, uuid); + /* + * Permanent credential reverse lookup: always + * store JSON object with userUuid and scope. + * Unscoped keys use scope: null. + */ + var permData = + akFormat.buildPermanentKeyLookup( + accesskeyid, uuid, accessKeyScope, + changenumber); + batch.set(accessKeyLookupKey, + JSON.stringify(permData)); + log.debug({ + accesskeyid: accesskeyid, + hasScope: !!accessKeyScope + }, 'Storing permanent credential'); } log.debug({batch: batch.queue}, 'accesskey.add: done'); cb(null, batch); }); + }); // close tombstone check } @@ -198,6 +317,7 @@ function del(opts, cb) { function modify(opts, cb) { assert.object(opts, 'opts'); + assert.object(opts.entry, 'opts.entry'); assert.object(opts.redis, 'opts.redis'); assert.func(cb, 'callback'); @@ -205,6 +325,7 @@ function modify(opts, cb) { var redis = opts.redis; var changes = opts.changes; var modEntry = opts.modEntry; + var entry = opts.entry; var status = null; var batch = redis.multi(); @@ -240,17 +361,30 @@ function modify(opts, cb) { return; } + var scopeChanged = false; + var newScope = null; + for (var i = changes.length - 1; i >= 0; i -= 1) { if (changes[i].operation === 'replace' && changes[i].modification.type === 'status') { - status = changes[i].modification.vals[i]; + status = changes[i].modification.vals[0]; + } + if (changes[i].modification.type === 'accesskeyscope') { + scopeChanged = true; + if (changes[i].operation === 'replace' || + changes[i].operation === 'add') { + var rawVal = changes[i].modification.vals[0]; + newScope = (rawVal != null) ? rawVal : null; + } else if (changes[i].operation === 'delete') { + newScope = null; + } } } - // Mahi only needs to perform an update if status changes - if (!status) { + // Mahi needs to update if status or scope changes + if (!status && !scopeChanged) { log.debug({ changes: changes }, - 'Skipping non-status related change to accesskey'); + 'Skipping non-status/scope change to accesskey'); cb(null, batch); return; } @@ -266,6 +400,23 @@ function modify(opts, cb) { return; } + /* + * Check for a revocation tombstone before writing. + */ + var revokedKey = akFormat.revokedKeyPath(accesskeyid); + redis.get(revokedKey, function (revErr, revoked) { + if (revErr) { + cb(revErr); + return; + } + if (revoked) { + log.info({ accesskeyid: accesskeyid }, + 'accesskey.modify: skipping revoked key' + + ' (tombstone present)'); + cb(null, batch); + return; + } + redis.get(key, function _redisGet(err, res) { if (err) { cb(err); @@ -274,15 +425,71 @@ function modify(opts, cb) { var payload = res ? JSON.parse(res) : {}; - // If status is Active, add the key - if (status === 'Active') { + /* + * Version check: skip the write if the existing entry + * has a higher version (e.g. written by cachePush with + * a newer changenumber). + */ + var changenumber = parseInt(entry.changenumber, 10); + assert.ok(Number.isFinite(changenumber) && + changenumber > 0, + 'changenumber must be a positive integer, got: ' + + entry.changenumber); + if (changenumber > 1e9) { + log.error({ + changenumber: changenumber, + accesskeyid: accesskeyid + }, 'accesskey.modify: changenumber exceeds' + + ' safety ceiling (version comparison' + + ' with cachePush may be broken)'); + } + var existingKeyData = payload.accesskeys ? + payload.accesskeys[accesskeyid] : null; + var existingVersion = 0; + if (existingKeyData && + typeof (existingKeyData) === 'object') { + existingVersion = + existingKeyData.version || 0; + } + if (existingVersion > changenumber) { + log.info({ + accesskeyid: accesskeyid, + existingVersion: existingVersion, + changenumber: changenumber + }, 'accesskey.modify: skipping stale' + + ' write (existing version newer)'); + cb(null, batch); + return; + } + + /* + * Determine the effective scope: if this modify event + * carries a scope change, use the new value; otherwise + * fall back to the current modEntry value (unchanged). + */ + var accessKeyScope = scopeChanged ? newScope : + (modEntry.accesskeyscope ? + modEntry.accesskeyscope[0] : null); + + // If status is Active (or scope-only change on an active key), + // update the key entry in Redis + if (status === 'Active' || (!status && scopeChanged)) { payload.accesskeys = payload.accesskeys || {}; - payload.accesskeys[accesskeyid] = accesskeysecret; + payload.accesskeys[accesskeyid] = + akFormat.buildPermanentKeyEntry( + accesskeysecret, accessKeyScope, + changenumber); batch.set(key, JSON.stringify(payload)); - // Add reverse lookup: access key ID -> user UUID - var activeLookup = sprintf('/accesskey/%s', accesskeyid); - batch.set(activeLookup, uuid); + // Add/update reverse lookup + var activeLookup = sprintf('/accesskey/%s', + accesskeyid); + var modData = + akFormat.buildPermanentKeyLookup( + accesskeyid, uuid, accessKeyScope, + changenumber); + batch.set(activeLookup, + JSON.stringify(modData)); log.debug({batch: batch.queue}, 'accesskeys.modify: done'); cb(null, batch); return; @@ -305,6 +512,7 @@ function modify(opts, cb) { cb(null, batch); return; }); + }); // close tombstone check } module.exports = { diff --git a/lib/server/redislib.js b/lib/server/redislib.js index 08658c6..fe5e2b6 100644 --- a/lib/server/redislib.js +++ b/lib/server/redislib.js @@ -559,6 +559,7 @@ function getUserByAccessKey(opts, cb) { */ var userUuid; var tempCredData = null; + var keyBucketScope = null; var isTemporaryCredential = accessKeyId.indexOf('MSTS') === 0 || accessKeyId.indexOf('MSAR') === 0; @@ -568,10 +569,12 @@ function getUserByAccessKey(opts, cb) { tempCredData = JSON.parse(lookupResult); userUuid = tempCredData.userUuid || tempCredData.principalUuid; + keyBucketScope = tempCredData.bucketScope || null; log.debug({ accessKeyId: accessKeyId, userUuid: userUuid, - hasAssumedRole: !!tempCredData.assumedRole + hasAssumedRole: !!tempCredData.assumedRole, + hasBucketScope: !!keyBucketScope }, 'getUserByAccessKey: parsed temp credential'); } catch (parseErr) { // Fallback: treat as plain UUID if parse fails @@ -581,8 +584,29 @@ function getUserByAccessKey(opts, cb) { }, 'getUserByAccessKey: failed to parse temp cred data'); userUuid = lookupResult; } + } else if (lookupResult.charAt(0) === '{') { + /* + * Scoped permanent credential — reverse lookup stores + * JSON with userUuid and scope fields. + */ + try { + var scopedData = JSON.parse(lookupResult); + userUuid = scopedData.userUuid; + keyBucketScope = scopedData.scope || null; + log.debug({ + accessKeyId: accessKeyId, + userUuid: userUuid, + hasScope: !!keyBucketScope + }, 'getUserByAccessKey: parsed scoped permanent key'); + } catch (scopeParseErr) { + log.warn({ + err: scopeParseErr, + accessKeyId: accessKeyId + }, 'getUserByAccessKey: failed to parse scoped key'); + userUuid = lookupResult; + } } else { - // Permanent credential - plain UUID + // Unscoped permanent credential - plain UUID userUuid = lookupResult; } @@ -654,7 +678,8 @@ function getUserByAccessKey(opts, cb) { tempCredData.assumedRole : null, isTemporaryCredential: isTemporaryCredential, sessionName: tempCredData ? - tempCredData.sessionName : null + tempCredData.sessionName : null, + bucketScope: keyBucketScope }; } else { /* @@ -673,7 +698,8 @@ function getUserByAccessKey(opts, cb) { tempCredData.assumedRole : null, isTemporaryCredential: isTemporaryCredential, sessionName: tempCredData ? - tempCredData.sessionName : null + tempCredData.sessionName : null, + bucketScope: keyBucketScope }; } diff --git a/lib/server/server.js b/lib/server/server.js index 7e7db3f..9e66a04 100644 --- a/lib/server/server.js +++ b/lib/server/server.js @@ -23,6 +23,7 @@ var sessionToken = require('./session-token'); var sts = require('./sts.js'); var ufds = require('ufds'); var uuid = require('uuid'); +var akFormat = require('../redis-accesskey-format'); var vasync = require('vasync'); var exec = require('child_process').exec; @@ -374,7 +375,40 @@ function Server(opts) { this.ufdsPool = new genericPool.Pool({ name: 'ufds', create: function (callback) { - var client = new ufds(ufdsConfig); + /* + * Disable ldapjs idle-disconnect. ufds defaults to + * idleTimeout:90000 which silently unbinds pooled + * connections after 90s idle; generic-pool has no + * testOnBorrow and hands dead clients to callers. Pool + * lifecycle (idleTimeoutMillis) handles teardown. + */ + var clientOpts = {}; + var k; + for (k in ufdsConfig) { + if (ufdsConfig.hasOwnProperty(k)) { + clientOpts[k] = ufdsConfig[k]; + } + } + clientOpts.idleTimeout = 0; + var client = new ufds(clientOpts); + /* + * Track liveness with _poolAlive. ufds does not expose a + * `connected` property, so the existing validate check + * (client.connected !== false) is always true. Listen for + * the close event — fired when the UFDS server drops the + * TCP connection — and mark the client dead so + * pool.acquire() skips it and creates a fresh connection + * instead. + */ + client._poolAlive = true; + client.on('close', function () { + client._poolAlive = false; + }); + client.on('error', function (err) { + opts.log.warn({ err: err }, + 'UFDS pool client error'); + client._poolAlive = false; + }); var timeout = setTimeout(function () { callback(new Error('UFDS connection timeout')); }, ufdsConfig.connectTimeout || 5000); @@ -400,19 +434,19 @@ function Server(opts) { } }, validate: function (client) { - return (client && client.connected !== false); + return (client && client._poolAlive !== false); }, min: ufdsConfig.poolMin || 5, max: ufdsConfig.poolMax || 20, - acquireTimeoutMillis: ufdsConfig.poolTimeout || 3000, + acquireTimeoutMillis: ufdsConfig.poolTimeout || 30000, idleTimeoutMillis: ufdsConfig.idleTimeout || 300000 }); opts.log.info({ url: ufdsConfig.url, bindDN: ufdsConfig.bindDN, - poolMin: ufdsConfig.poolMin || 2, - poolMax: ufdsConfig.poolMax || 10 + poolMin: ufdsConfig.poolMin || 5, + poolMax: ufdsConfig.poolMax || 20 }, 'UFDS connection pool initialized for STS operations'); } catch (err) { @@ -820,11 +854,39 @@ function Server(opts) { // Build session secret config for JWT validation var secretConfig = buildSecretConfig(opts.sessionConfig); + /* + * Wrap the UFDS connection pool in a proxy that implements the + * .search() interface expected by sigv4.js. Each search acquires a + * client from the pool and releases it after the callback. + */ + var ufdsProxy = null; + if (req.ufdsPool) { + ufdsProxy = { + search: function (base, searchOpts, cb) { + req.ufdsPool.acquire( + function (poolErr, client) { + if (poolErr) { + req.log.warn({err: poolErr}, + 'sigv4 ufds-proxy: pool' + + ' acquire failed'); + cb(poolErr); + return; + } + client.search(base, searchOpts, + function (err, res) { + req.ufdsPool.release(client); + cb(err, res); + }); + }); + } + }; + } + sigv4.verifySigV4({ req: req, log: req.log, redis: req.redis, - ufdsPool: req.ufdsPool, + ufds: ufdsProxy, secretConfig: secretConfig }, function (err, result) { if (err) { @@ -838,10 +900,354 @@ function Server(opts) { userUuid: result.user.uuid, assumedRole: result.assumedRole, principalUuid: result.principalUuid, - isTemporaryCredential: result.isTemporaryCredential, - signingKey: result.signingKey + isTemporaryCredential: + result.isTemporaryCredential, + signingKey: result.signingKey, + bucketScope: result.bucketScope || null + }); + next(); + }); + }); + + /** + * @brief Emergency access key revocation + * + * Removes an access key from Redis immediately, bypassing the UFDS + * replication delay. Use for emergency revocation of compromised keys. + * + * @param {string} req.params.accesskeyid - Key to revoke + * @return {Object} { revoked: true, accessKeyId } + */ + server.post({ + name: 'keyRevoke', + path: '/key-revoke/:accesskeyid' + }, function keyRevokeHandler(req, res, next) { + var accesskeyid = req.params.accesskeyid; + var log = req.log; + + if (!accesskeyid) { + next(new errors.MissingParameterError('accesskeyid')); + return; + } + + var lookupKey = '/accesskey/' + accesskeyid; + + req.redis.get(lookupKey, + function (err, lookupVal) { + if (err) { + next(new errors.RedisError(err)); + return; + } + if (!lookupVal) { + res.send(404, { + error: 'AccessKeyNotFound', + message: 'Access key not found' + ' in cache' + }); + next(); + return; + } + + /* + * Extract userUuid from the lookup value. + * May be a JSON object or legacy UUID string. + */ + var userUuid; + if (lookupVal.charAt(0) === '{') { + try { + var parsed = JSON.parse(lookupVal); + userUuid = parsed.userUuid; + } catch (e) { + log.error({ + lookupKey: lookupKey, + lookupVal: lookupVal + }, 'key-revoke: corrupt lookup' + + ' value'); + next(new errors.InternalError('Corrupt access key data')); + return; + } + } else { + userUuid = lookupVal; + } + + if (!userUuid) { + next(new errors.InternalError( + 'Cannot determine user UUID')); + return; + } + + var userKey = '/uuid/' + userUuid; + req.redis.get(userKey, + function (userErr, userRes) { + if (userErr) { + next(new errors.RedisError(userErr)); + return; + } + + var batch = req.redis.multi(); + + if (userRes) { + var payload; + try { + payload = JSON.parse(userRes); + } catch (parseErr) { + log.error({ + err: parseErr, + userKey: userKey + }, 'key-revoke: corrupt user ' + + 'record in Redis'); + next(new errors.RedisError( + 'corrupt user record')); + return; + } + if (payload.accesskeys && + payload.accesskeys[accesskeyid]) { + delete payload.accesskeys[ + accesskeyid]; + batch.set(userKey, + JSON.stringify(payload)); + } + } + + batch.del(lookupKey); + + /* + * Write a revocation tombstone with a 24-hour TTL. The + * replicator checks for this key before writing — if present, + * the write is skipped. Repeated key-revoke calls renew the TTL + * (setex is idempotent). + */ + var revokedKey = + akFormat.revokedKeyPath(accesskeyid); + batch.setex(revokedKey, + akFormat.REVOKE_TTL_SECONDS, + JSON.stringify(akFormat. + buildRevocationTombstone(userUuid))); + + batch.exec(function (execErr) { + if (execErr) { + next(new errors.RedisError( + execErr)); + return; + } + + log.info({ + accesskeyid: accesskeyid, + userUuid: userUuid, + tombstoneTtl: akFormat.REVOKE_TTL_SECONDS + }, 'Emergency scope revocation' + + ' completed (tombstone set)'); + + res.send(200, { + revoked: true, + accessKeyId: accesskeyid, + userUuid: userUuid, + tombstoneTtlSeconds: + akFormat.REVOKE_TTL_SECONDS, + replicationWarning: + 'Key removed from Redis cache' + + ' and revocation tombstone set' + + ' (' + akFormat.REVOKE_TTL_SECONDS + + 's TTL). Repeated calls renew' + + ' the tombstone. Delete from' + + ' UFDS via CloudAPI to' + + ' permanently revoke.' + }); + next(); + }); + }); + }); + }); + + /** + * @brief Write-through cache push for access keys + * + * Writes an access key directly to Redis, bypassing the UFDS replication + * delay. Called by CloudAPI after creating or updating a key in UFDS. + * + * The Redis format matches the replicator output (transforms/accesskey.js) + * exactly: + * + * /uuid//accesskeys[id] = {secret, scope} /accesskey/ = JSON + * {type, accessKeyId, userUuid, credentialType, scope} + * + * @param {string} req.params.accesskeyid @param {Object} req.body + * accesskeysecret: string (required) ownerUuid: string (required) status: + * string (default 'Active') scope: string|null (accesskeyscope JSON) + * @return {Object} { pushed: true, accessKeyId } + */ + server.post({ + name: 'cachePush', + path: '/cache-push/:accesskeyid' + }, function cachePushHandler(req, res, next) { + var accesskeyid = req.params.accesskeyid; + var body = req.body || {}; + var log = req.log; + + if (!accesskeyid) { + next(new errors.MissingParameterError('accesskeyid')); + return; + } + + var secret = body.accesskeysecret; + var ownerUuid = body.ownerUuid; + var status = body.status || 'Active'; + var scope = (body.scope != null) ? body.scope : null; + + /* + * cachePush uses Date.now() as the version because UFDS does not return + * the changenumber to CloudAPI callers. Date.now() (~1.7 trillion) is + * always larger than any UFDS changenumber (sequential integer), so + * cachePush always wins over the replicator — which is correct since + * cachePush is called immediately after the UFDS write, carrying the + * freshest data. + */ + var version = Date.now(); + + if (!secret || !ownerUuid) { + res.send(400, { + error: 'InvalidArgument', + message: 'accesskeysecret and' + ' ownerUuid are required' }); next(); + return; + } + + /* + * If key is not Active, delegate to removal + * (same logic as key-revoke). + */ + if (status !== 'Active') { + var lookupKey = '/accesskey/' + accesskeyid; + var userKey = '/uuid/' + ownerUuid; + + req.redis.get(userKey, + function (getErr, userRes) { + if (getErr) { + next(new errors.RedisError(getErr)); + return; + } + + var batch = req.redis.multi(); + + if (userRes) { + var p; + try { + p = JSON.parse(userRes); + } catch (parseErr) { + log.error({ + err: parseErr, + userKey: userKey + }, 'key-revoke: corrupt user ' + 'record in Redis'); + next(new errors.RedisError('corrupt user record')); + return; + } + if (p.accesskeys && + p.accesskeys[accesskeyid]) { + delete p.accesskeys[accesskeyid]; + batch.set(userKey, JSON.stringify(p)); + } + } + + batch.del(lookupKey); + + batch.exec(function (execErr) { + if (execErr) { + next(new errors.RedisError(execErr)); + return; + } + + log.info({ + accesskeyid: accesskeyid, + ownerUuid: ownerUuid, + status: status + }, 'cache-push: removed inactive' + ' key from Redis'); + + res.send(200, { + pushed: true, + accessKeyId: accesskeyid, + action: 'removed' + }); + next(); + }); + }); + return; + } + + /* + * Active key: write to Redis in the same format + * as replicator transforms/accesskey.js + */ + var userKey2 = '/uuid/' + ownerUuid; + + req.redis.get(userKey2, + function (err, userRes) { + if (err) { + next(new errors.RedisError(err)); + return; + } + + var payload; + if (userRes) { + try { + payload = JSON.parse(userRes); + } catch (parseErr) { + log.error({ + err: parseErr, + userKey: userKey2 + }, 'cache-push: corrupt user ' + + 'record in Redis'); + next(new errors.RedisError( + 'corrupt user record')); + return; + } + } else { + payload = {}; + } + payload.accesskeys = + payload.accesskeys || {}; + + /* + * Permanent credential: unified format + * via shared builder (same as replicator). + */ + payload.accesskeys[accesskeyid] = + akFormat.buildPermanentKeyEntry( + secret, scope, version); + + var batch = req.redis.multi(); + batch.set(userKey2, JSON.stringify(payload)); + + /* + * Reverse lookup: permanent credential + * via shared builder. + */ + var lookupData = + akFormat.buildPermanentKeyLookup( + accesskeyid, ownerUuid, scope, + version); + batch.set('/accesskey/' + accesskeyid, + JSON.stringify(lookupData)); + + batch.exec(function (execErr) { + if (execErr) { + next(new errors.RedisError( + execErr)); + return; + } + + log.info({ + accesskeyid: accesskeyid, + ownerUuid: ownerUuid, + hasScope: !!scope + }, 'cache-push: key written to Redis'); + + res.send(200, { + pushed: true, + accessKeyId: accesskeyid, + action: 'written' + }); + next(); + }); }); }); diff --git a/lib/server/sigv4.js b/lib/server/sigv4.js index 52817cc..5d8c6fc 100644 --- a/lib/server/sigv4.js +++ b/lib/server/sigv4.js @@ -420,6 +420,340 @@ function calculateSignature(secretKey, dateStamp, region, service, }); } +/** + * @brief Verify a SigV4 signature given a secret key + * + * Shared helper used by the Redis path, the UFDS + * permanent-key fallback, and the UFDS temporary- + * credential fallback. Validates timestamp freshness, + * builds the canonical request, and compares the + * computed signature against the client's signature. + * + * @param {Object} authInfo - Parsed Authorization header + * @param {string} secretKey - The access key secret + * @param {Object} req - HTTP request (headers, query) + * @param {Object} log - Bunyan logger + * @return {Object} { err, signingKey } — err is null + * on success, an Error on failure + */ +function verifySigV4Signature(authInfo, secretKey, req, log) { + var timestamp = req.headers['x-amz-date'] || + req.headers.date; + if (!timestamp) { + return ({ + err: new errors.InvalidSignatureError( + 'Missing timestamp'), + signingKey: null + }); + } + + var requestTime = + parseISO8601Basic(timestamp).getTime(); + var currentTime = Date.now(); + var timeDiff = Math.abs(currentTime - requestTime); + + if (requestTime > Y2038_THRESHOLD_MS) { + log.warn({ + requestTimestamp: timestamp, + requestTimeMs: requestTime, + systemTimeMs: currentTime, + timeDiff: timeDiff + }, 'Y2038: Request timestamp beyond Y2038 ' + + 'threshold.'); + } + + if (timeDiff > 15 * 60 * 1000) { + return ({ + err: new errors.InvalidSignatureError( + 'Request timestamp too old'), + signingKey: null + }); + } + + var originalMethod = + req.query.method || req.method; + var originalUrl = + req.query.url || req.url; + var uri = originalUrl.split('?')[0]; + if (req.query.url) { + originalUrl = + decodeURIComponent(originalUrl); + uri = decodeURIComponent(uri); + } + + var queryString = + originalUrl.split('?')[1] || ''; + var payloadHash = + req.headers['x-amz-content-sha256'] || + 'UNSIGNED-PAYLOAD'; + + var canonicalRequest = createCanonicalRequest( + originalMethod, uri, queryString, + req.headers, authInfo.signedHeaders, + payloadHash); + + var credentialScope = sprintf( + '%s/%s/%s/aws4_request', + authInfo.dateStamp, authInfo.region, + authInfo.service); + var stringToSign = createStringToSign( + timestamp, credentialScope, + canonicalRequest); + + var sigResult = calculateSignature( + secretKey, authInfo.dateStamp, + authInfo.region, authInfo.service, + stringToSign); + + if (sigResult.signature !== authInfo.signature) { + log.debug({ + expected: sigResult.signature, + received: authInfo.signature, + stringToSign: stringToSign, + canonicalRequest: canonicalRequest + }, 'Signature mismatch'); + return ({ + err: new errors.InvalidSignatureError( + 'Signature mismatch'), + signingKey: null + }); + } + + return ({ + err: null, + signingKey: sigResult.signingKey + }); +} + + +/* + * Negative cache for permanent key IDs not found in + * UFDS. Prevents an attacker from forcing UFDS LDAP + * queries by sending garbage key IDs. Entries expire + * after NEGATIVE_CACHE_TTL_MS. + * + * FALSE-NEGATIVE RISK: A key is added to this cache when + * it is absent from Redis and no UFDS client is passed to + * verifySigV4 (opts.ufds is null). When UFDS IS available, + * the read-through path is used instead and the negative + * cache is bypassed. If the key subsequently arrives in + * Redis (via replication or cache-push), the cache entry + * will still cause 401s for up to NEGATIVE_CACHE_TTL_MS + * (30 seconds). + * + * This can happen when: + * 1. A key is created in UFDS but the replicator is lagging. + * 2. A request arrives before the key reaches Redis. + * 3. The key later replicates to Redis. + * 4. For the next ~30 s, the cache rejects the valid key. + * + * Mitigation: CloudAPI calls POST /cache-push immediately + * after key creation so the key is in Redis before the + * caller's first request. Do NOT skip the cache-push call + * for newly created permanent keys. + */ +var NEGATIVE_CACHE_TTL_MS = 30000; +var NEGATIVE_CACHE_MAX_SIZE = 10000; +var NEGATIVE_CACHE_SWEEP_MS = 60000; +var negativeKeyCache = {}; +var negativeCacheSize = 0; + +/* + * Periodic sweep: evict expired entries every 60s instead + * of resetting the entire cache at the size limit. This + * avoids the cliff behavior where a full cache is emptied + * in one shot, causing a burst of UFDS queries. + */ +setInterval(function sweepNegativeCache() { + var now = Date.now(); + var keys = Object.keys(negativeKeyCache); + for (var i = 0; i < keys.length; i++) { + if (now - negativeKeyCache[keys[i]] > + NEGATIVE_CACHE_TTL_MS) { + delete negativeKeyCache[keys[i]]; + negativeCacheSize--; + } + } +}, NEGATIVE_CACHE_SWEEP_MS).unref(); + +/** + * @brief Check if a key ID is in the negative cache + * + * @param {string} keyId + * @return {boolean} true if recently not-found + */ +function isNegativelyCached(keyId) { + var entry = negativeKeyCache[keyId]; + if (!entry) { + return (false); + } + if (Date.now() - entry > NEGATIVE_CACHE_TTL_MS) { + /* + * Expired — treat as not cached. Do NOT delete + * here; the periodic sweep handles eviction and + * counter decrement. Deleting in both places + * causes counter drift (double-decrement). + */ + return (false); + } + return (true); +} + +/** + * @brief Add a key ID to the negative cache + * + * If the cache exceeds NEGATIVE_CACHE_MAX_SIZE, new + * entries are not added (the periodic sweep will free + * space). This bounds memory without the cliff-reset + * behavior that previously emptied the entire cache. + * + * @param {string} keyId + */ +var _lastCacheFullWarn = 0; +function addToNegativeCache(keyId) { + if (negativeCacheSize >= NEGATIVE_CACHE_MAX_SIZE) { + var now = Date.now(); + if (now - _lastCacheFullWarn > 60000) { + _lastCacheFullWarn = now; + /* global console */ + console.error( + 'sigv4: negative cache full (%d ' + + 'entries), dropping new entries — ' + + 'possible sustained garbage-key attack', + negativeCacheSize); + } + return; + } + negativeKeyCache[keyId] = Date.now(); + negativeCacheSize++; +} + + +/** + * @brief UFDS read-through for permanent keys + * + * Called when a permanent key is not found in Redis + * (replication lag). Searches UFDS directly, verifies + * the SigV4 signature, and returns the same result + * shape as the Redis path. + * + * @param {Object} authInfo - Parsed auth header + * @param {Object} req - HTTP request + * @param {Object} log - Bunyan logger + * @param {Object} redis - Redis client (unused, for + * interface compat) + * @param {Object} ufds - UFDS client + * @param {Function} cb - callback(err, result) + */ +function handlePermanentCredentialUfds( + authInfo, req, log, redis, ufds, cb) { + + /* + * Validate access key ID format before using it in + * an LDAP filter. Access key IDs are hex strings + * (32 chars for permanent keys). Reject anything + * that contains LDAP special characters to prevent + * filter injection. + */ + if (!/^[a-zA-Z0-9_-]+$/.test(authInfo.accessKeyId)) { + cb(new errors.InvalidSignatureError( + 'Invalid access key format')); + return; + } + + if (isNegativelyCached(authInfo.accessKeyId)) { + cb(new errors.InvalidSignatureError( + 'Invalid access key')); + return; + } + + var searchBase = 'ou=users, o=smartdc'; + var searchFilter = + '(&(objectclass=accesskey)(accesskeyid=' + + authInfo.accessKeyId + + ')(status=Active))'; + + ufds.search(searchBase, { + scope: 'sub', + filter: searchFilter + }, function (searchErr, searchRes) { + if (searchErr) { + /* + * UFDS unreachable — return 503 so the + * client retries, rather than masking + * the outage as "key not found." + */ + log.error({ + err: searchErr, + accessKeyId: authInfo.accessKeyId + }, 'sigv4.ufds-fallback: UFDS search' + + ' failed'); + cb(new errors.ReplicatorNotReadyError()); + return; + } + + if (!searchRes || searchRes.length === 0) { + log.warn({ + accessKeyId: authInfo.accessKeyId + }, 'sigv4.ufds-fallback: key not found' + + ' in UFDS — genuinely invalid'); + addToNegativeCache(authInfo.accessKeyId); + cb(new errors.InvalidSignatureError( + 'Invalid access key')); + return; + } + + var cred = searchRes[0]; + var credData = cred.object || cred; + var secretKey = credData.accesskeysecret; + var ownerUuid = credData._owner; + var bucketScope = credData.accesskeyscope || + null; + + if (Array.isArray(secretKey)) { + secretKey = secretKey[0]; + } + if (Array.isArray(ownerUuid)) { + ownerUuid = ownerUuid[0]; + } + if (Array.isArray(bucketScope)) { + bucketScope = bucketScope[0]; + } + + if (!secretKey || !ownerUuid) { + log.error({ + accessKeyId: authInfo.accessKeyId, + hasSecret: !!secretKey, + hasOwner: !!ownerUuid + }, 'sigv4.ufds-fallback: incomplete' + + ' UFDS entry'); + cb(new errors.InvalidSignatureError( + 'Incomplete access key data')); + return; + } + + var result = verifySigV4Signature( + authInfo, secretKey, req, log); + if (result.err) { + cb(result.err); + return; + } + + log.info({ + accessKeyId: authInfo.accessKeyId, + ownerUuid: ownerUuid + }, 'sigv4.ufds-fallback: verification' + + ' successful (read-through)'); + + cb(null, buildPermanentResult( + { uuid: ownerUuid }, + authInfo.accessKeyId, + result.signingKey, + bucketScope)); + }); +} + + /** * @brief Handle temporary credential verification for STS * @@ -533,7 +867,21 @@ function handleTemporaryCredential(authInfo, sessionToken, req, log, ufds, cb) { return; } - var user = JSON.parse(userRes); + var user; + try { + user = JSON.parse(userRes); + } catch (parseErr) { + log.error({ + err: parseErr, + principalUuid: principalUuid, + userRes: userRes ? + userRes.substring(0, 100) : 'null' + }, 'Failed to parse principal user' + + ' data from Redis'); + cb(new errors.InvalidSignatureError( + 'Corrupt principal user data')); + return; + } // Verify signature using the temporary credential's secret key var secretKey = credData.accesskeysecret; @@ -639,18 +987,16 @@ function handleTemporaryCredential(authInfo, sessionToken, req, log, ufds, cb) { } // Return result with role information - var result = { - user: user, + var result = buildTemporaryResult({ accessKeyId: authInfo.accessKeyId, userUuid: principalUuid, - valid: true, - // Additional fields for role-based access - isTemporaryCredential: true, + user: user, assumedRole: credData.assumedrole, principalUuid: principalUuid, - credentialType: 'temporary', - signingKey: sigResult.signingKey - }; + signingKey: sigResult.signingKey, + bucketScope: (credData.accesskeyscope != null) + ? credData.accesskeyscope : null + }); log.debug({ accessKeyId: authInfo.accessKeyId, @@ -666,12 +1012,388 @@ function handleTemporaryCredential(authInfo, sessionToken, req, log, ufds, cb) { }); } +/** + * @brief Build the standard result object for temporary credential + * verification. + * + * Shared by handleTemporaryCredentialRedis and handleTemporaryCredential (UFDS) + * to prevent divergence in result construction in the permanent credential + * path. + * @param {Object} opts + * @param {string} opts.accessKeyId + * @param {string} [opts.secretAccessKey] + * @param {string} opts.userUuid + * @param {Object} opts.user - User object + * @param {Object|string} [opts.assumedRole] + * @param {string} [opts.principalUuid] + * @param {string} [opts.expiration] + * @param {Buffer} opts.signingKey + * @param {string|null} [opts.bucketScope] + * @return {Object} Verification result + */ +function buildTemporaryResult(opts) { + return ({ + accessKeyId: opts.accessKeyId, + secretAccessKey: opts.secretAccessKey || null, + userUuid: opts.userUuid, + user: opts.user || { uuid: opts.userUuid }, + account: { uuid: opts.userUuid }, + isTemporary: true, + isTemporaryCredential: true, + assumedRole: opts.assumedRole || null, + principalUuid: opts.principalUuid || opts.userUuid, + credentialType: 'temporary', + expiration: opts.expiration || null, + signingKey: opts.signingKey, + bucketScope: (opts.bucketScope != null) ? opts.bucketScope : null + }); +} + + +/** + * @brief Build the standard result object for permanent credential + * verification. + * + * Shared by handlePermanentCredentialRedis and handlePermanentCredentialUfds to + * prevent divergence in result construction. + * + * @param {Object} user - User object (full from Redis or + * {uuid} stub from UFDS) + * @param {string} accessKeyId - Verified access key ID + * @param {Buffer} signingKey - Derived signing key + * @param {string|null} bucketScope - Scope JSON string or null + * @return {Object} Verification result + */ +function buildPermanentResult(user, accessKeyId, signingKey, bucketScope) { + return ({ + user: user, + accessKeyId: accessKeyId, + signingKey: signingKey, + bucketScope: (bucketScope != null) ? bucketScope : null + }); +} + + +/** + * @brief Handle permanent credential verification via Redis + * + * Extracted from verifySigV4() to reduce indentation and isolate + * the Redis-based permanent key verification path. + * + * 1. Reverse-lookup /accesskey/{id} to get userUuid + * 2. Fetch /uuid/{uuid} to get user record with accesskeys map + * 3. Extract secret + scope from key data (object or legacy string) + * 4. Verify SigV4 signature + * 5. Return result via buildPermanentResult() + * + * On Redis miss, delegates to handlePermanentCredentialUfds() + * or the negative cache. + * + * @param {Object} authInfo - Parsed auth header + * @param {Object} req - HTTP request + * @param {Object} log - Bunyan logger + * @param {Object} redis - Redis client + * @param {Object|null} ufds - UFDS client (null when unavailable) + * @param {Function} cb - callback(err, result) + */ +function handlePermanentCredentialRedis( + authInfo, req, log, redis, ufds, cb) { + + var accessKeyLookupKey = sprintf('/accesskey/%s', + authInfo.accessKeyId); + redis.get(accessKeyLookupKey, function (err, lookupVal) { + if (err) { + cb(new errors.RedisError(err)); + return; + } + + if (!lookupVal) { + /* + * Key not in Redis. This happens + * when a key was just created in + * UFDS but the replicator has not + * synced it yet (~2s). Fall through + * to a direct UFDS lookup so the + * first request succeeds without + * requiring a client retry. + */ + if (!ufds) { + addToNegativeCache( + authInfo.accessKeyId); + cb(new errors.InvalidSignatureError( + 'Invalid access key')); + return; + } + log.info({ + accessKeyId: authInfo.accessKeyId + }, 'sigv4.verify: permanent key' + + ' not in Redis, trying UFDS' + + ' read-through'); + handlePermanentCredentialUfds( + authInfo, req, log, + redis, ufds, cb); + return; + } + + /* + * Reverse-lookup values are always JSON + * objects with a userUuid field. Legacy + * plain-UUID format is no longer written + * but tolerated for in-flight keys that + * have not been re-replicated yet. + */ + var userUuid; + if (lookupVal.charAt(0) === '{') { + try { + var parsed = JSON.parse(lookupVal); + userUuid = parsed.userUuid; + } catch (e) { + cb(new errors.InvalidSignatureError( + 'Corrupt access key lookup')); + return; + } + } else { + userUuid = lookupVal; + } + + if (!userUuid) { + cb(new errors.InvalidSignatureError( + 'Invalid access key data')); + return; + } + + var userKey = sprintf('/uuid/%s', userUuid); + redis.get(userKey, function (userErr, userRes) { + if (userErr) { + cb(new errors.RedisError(userErr)); + return; + } + + if (!userRes) { + cb(new errors.InvalidSignatureError( + 'User not found')); + return; + } + + var user; + try { + user = JSON.parse(userRes); + } catch (parseErr) { + log.error({ + err: parseErr, + userUuid: userUuid, + userRes: userRes ? + userRes.substring(0, 100) : + 'null' + }, 'Failed to parse user data' + + ' from Redis'); + cb(new errors.InvalidSignatureError( + 'Corrupt user data')); + return; + } + if (!user.accesskeys || + !user.accesskeys[authInfo.accessKeyId]) { + cb(new errors.InvalidSignatureError( + 'Access key not found')); + return; + } + + /* + * Extract secret key and optional + * bucket scope from access key data. + * + * The new replicator writes all permanent + * keys as objects: + * { secret: "...", scope: } + * + * The string fallback below handles the + * pre-upgrade Redis format where permanent + * keys were stored as bare secret strings. + * During a rolling upgrade, existing keys + * remain as strings until the replicator + * re-writes them (on status toggle, scope + * change, or key rotation). Remove this + * branch once all deployments have been + * upgraded and keys re-replicated. + */ + var keyData = + user.accesskeys[authInfo.accessKeyId]; + var secretKey; + var bucketScope = null; + if (keyData && + typeof (keyData) === 'object') { + secretKey = keyData.secret; + bucketScope = keyData.scope || + null; + } else if (typeof (keyData) === 'string') { + secretKey = keyData; + } + if (!secretKey) { + log.warn({ + accessKeyId: authInfo.accessKeyId, + keyDataType: typeof (keyData), + hasSecret: keyData && + typeof (keyData) === 'object' ? + !!keyData.secret : 'n/a' + }, 'Access key data present but ' + + 'secret could not be extracted'); + cb(new errors.InvalidSignatureError( + 'Access key secret not found')); + return; + } + + var sigResult = verifySigV4Signature( + authInfo, secretKey, req, log); + if (sigResult.err) { + cb(sigResult.err); + return; + } + + log.debug({ + accessKeyId: authInfo.accessKeyId, + userUuid: userUuid + }, 'SigV4 verification successful'); + cb(null, buildPermanentResult( + user, authInfo.accessKeyId, + sigResult.signingKey, bucketScope)); + }); + }); +} + + +/** + * @brief Handle temporary credential verification via Redis + * + * Extracted from verifySigV4() to reduce indentation and isolate + * the Redis-based temporary credential verification path. + * + * Called after JWT session token validation succeeds. Looks up + * the temporary credential in Redis, checks expiration, derives + * the signing key, and returns the result. + * + * On Redis miss or parse failure, delegates to + * handleTemporaryCredential() (UFDS path). + * + * @param {Object} authInfo - Parsed auth header + * @param {Object} tokenData - Validated JWT token data + * @param {string} sessionToken - Raw session token string + * @param {Object} req - HTTP request + * @param {Object} log - Bunyan logger + * @param {Object} redis - Redis client + * @param {Object|null} ufds - UFDS client (null when unavailable) + * @param {Function} cb - callback(err, result) + */ +function handleTemporaryCredentialRedis( + authInfo, tokenData, sessionToken, req, log, redis, ufds, cb) { + + var accessKeyLookupKey = sprintf('/accesskey/%s', + authInfo.accessKeyId); + redis.get(accessKeyLookupKey, function (redisErr, credentialData) { + if (redisErr) { + log.error({ + err: redisErr, + accessKeyId: authInfo.accessKeyId + }, 'sigv4.verify: Redis lookup failed for' + + ' temporary credential'); + cb(new errors.RedisError(redisErr)); + return; + } + + if (credentialData) { + var tempCredData; + try { + tempCredData = JSON.parse(credentialData); + } catch (_parseErr) { + log.debug({ + accessKeyId: authInfo.accessKeyId, + credentialDataType: typeof (credentialData), + credentialDataLength: credentialData ? + credentialData.length : 0, + credentialDataSample: credentialData ? + credentialData.substring(0, 100) + + '...' : 'null' + }, 'sigv4.verify: Redis contains non-JSON data,' + + ' probably UUID - trying UFDS'); + + if (ufds) { + handleTemporaryCredential(authInfo, + sessionToken, req, log, ufds, cb); + return; + } else { + cb(new errors.InvalidSignatureError( + 'Cannot verify temporary credentials')); + return; + } + } + + if (tempCredData.expiration && + new Date(tempCredData.expiration) < new Date()) { + log.info({ + accessKeyId: authInfo.accessKeyId, + expiration: tempCredData.expiration + }, 'sigv4.verify: Temporary credential expired'); + cb(new errors.InvalidSignatureError( + 'Credential expired')); + return; + } + + log.debug({ + accessKeyId: authInfo.accessKeyId, + userUuid: tempCredData.userUuid, + jwtUserUuid: tokenData.uuid, + expiration: tempCredData.expiration, + assumedRole: tempCredData.assumedRole ? + tempCredData.assumedRole.arn : null + }, 'sigv4.verify: Successfully verified temporary' + + ' credential from Redis with JWT'); + + var redisSigResult = calculateSignature( + tempCredData.secretAccessKey, + authInfo.dateStamp, + authInfo.region, + authInfo.service, + 'dummy'); + + cb(null, buildTemporaryResult({ + accessKeyId: tempCredData.accessKeyId, + secretAccessKey: tempCredData.secretAccessKey, + userUuid: tempCredData.userUuid, + assumedRole: tempCredData.assumedRole, + expiration: tempCredData.expiration, + signingKey: redisSigResult.signingKey, + bucketScope: (tempCredData.bucketScope != null) + ? tempCredData.bucketScope : null + })); + return; + } + + // Not found in Redis - try UFDS if available + if (ufds) { + log.info({ + accessKeyId: authInfo.accessKeyId + }, 'sigv4.verify: Temporary credential' + + ' not in Redis, trying UFDS'); + handleTemporaryCredential(authInfo, + sessionToken, req, log, ufds, cb); + return; + } + + log.error({ + accessKeyId: authInfo.accessKeyId + }, 'sigv4.verify: Temporary credential' + + ' not found in Redis and no UFDS available'); + cb(new errors.InvalidSignatureError( + 'Cannot verify temporary credentials')); + }); +} + + /** * @brief Verify AWS Signature Version 4 authentication * - * Validates AWS SigV4 signatures for both permanent and temporary - * credentials. Handles complete signature verification workflow - * including credential lookup, signature calculation, and validation. + * Dispatcher that routes to the appropriate credential handler + * based on whether a session token is present (temporary) or + * not (permanent). * * @param {Object} opts Verification options object containing: * @param {Object} opts.req HTTP request object with headers/query @@ -709,7 +1431,14 @@ function verifySigV4(opts, cb) { var req = opts.req; var log = opts.log; var redis = opts.redis; - var ufds = opts.ufds; // UFDS client for temporary credential lookup + /* + * UFDS client (or pool proxy with .search()). + * Used for read-through when a key is absent + * from Redis (replication lag). Null when UFDS + * is not configured — permanent key misses go + * to the negative cache, temp cred misses fail. + */ + var ufds = opts.ufds || null; log.debug('sigv4.verify: entered'); @@ -807,20 +1536,18 @@ function verifySigV4(opts, cb) { hasRedis: !!redis }, 'sigv4.verify: ROUTING TO TEMPORARY CREDENTIAL HANDLER'); - // For temporary credentials, validate the JWT session token first var secretConfig = opts.secretConfig; if (!secretConfig || !secretConfig.secrets) { log.error({ accessKeyId: authInfo.accessKeyId }, 'sigv4.verify: No session secret config available for ' + - 'JWT validation'); + 'JWT validation'); cb(new errors.InvalidSignatureError( 'Cannot verify session token')); return; } - // DEBUG: Log session token metadata (never full token) log.debug({ accessKeyId: authInfo.accessKeyId, sessionTokenLength: sessionToken ? sessionToken.length : 0, @@ -834,12 +1561,9 @@ function verifySigV4(opts, cb) { sessionToken.split('.').length : 0 }, 'sigv4.verify: Session token validation metadata'); - // Verify the JWT session token - // (async callback approach for old JWT library) sessionTokenModule.verifySessionToken(sessionToken, - secretConfig, {}, - function (jwtErr, - tokenData) { + secretConfig, {}, + function (jwtErr, tokenData) { if (jwtErr) { log.error({ err: jwtErr, @@ -865,7 +1589,7 @@ function verifySigV4(opts, cb) { accessKeyId: authInfo.accessKeyId, tokenData: tokenData }, 'sigv4.verify: JWT session token validation failed' + - ' - no user UUID'); + ' - no user UUID'); cb(new errors.InvalidSignatureError( 'Invalid session token')); return; @@ -878,127 +1602,11 @@ function verifySigV4(opts, cb) { userUuid: tokenData ? tokenData.uuid : null }, 'sigv4.verify: JWT session token verification SUCCESS'); - // Now lookup the temporary credential in Redis - var accessKeyLookupKey = sprintf('/accesskey/%s', - authInfo.accessKeyId); - redis.get(accessKeyLookupKey, function (redisErr, credentialData) { - if (redisErr) { - log.error({ - err: redisErr, - accessKeyId: authInfo.accessKeyId - }, 'sigv4.verify: Redis lookup failed for' + - ' temporary credential'); - cb(new errors.RedisError(redisErr)); - return; - } - - if (credentialData) { - var tempCredData; - try { - tempCredData = JSON.parse(credentialData); - } catch (_parseErr) { - log.debug({ - accessKeyId: authInfo.accessKeyId, - credentialDataType: typeof (credentialData), - credentialDataLength: credentialData ? - credentialData.length : 0, - credentialDataSample: credentialData ? - credentialData.substring(0, 100) + - '...' : 'null' - }, 'sigv4.verify: Redis contains non-JSON data,' + - ' probably UUID - trying UFDS'); - - // If Redis contains just a UUID (old format), - // fall back to UFDS - if (ufds) { - handleTemporaryCredential(authInfo, - sessionToken, - req, - log, - ufds, - cb); - return; - } else { - cb(new errors.InvalidSignatureError( - 'Cannot verify temporary credentials')); - return; - } - } - - // Check expiration from credential data - if (tempCredData.expiration && - new Date(tempCredData.expiration) < new Date()) { - log.info({ - accessKeyId: authInfo.accessKeyId, - expiration: tempCredData.expiration - }, 'sigv4.verify: Temporary credential expired'); - cb(new errors.InvalidSignatureError - ('Credential expired')); - return; - } - - log.debug({ - accessKeyId: authInfo.accessKeyId, - userUuid: tempCredData.userUuid, - jwtUserUuid: tokenData.uuid, - expiration: tempCredData.expiration, - assumedRole: tempCredData.assumedRole ? - tempCredData.assumedRole.arn : null - }, 'sigv4.verify: Successfully verified temporary' + - ' credential from Redis with JWT'); - - // Derive signing key for chunk signature - // verification - var redisSigResult = calculateSignature( - tempCredData.secretAccessKey, - authInfo.dateStamp, - authInfo.region, - authInfo.service, - 'dummy'); - - // Return the credential info for signature verification - cb(null, { - accessKeyId: tempCredData.accessKeyId, - secretAccessKey: tempCredData.secretAccessKey, - userUuid: tempCredData.userUuid, - user: { uuid: tempCredData.userUuid }, - account: { uuid: tempCredData.userUuid }, - isTemporary: true, - isTemporaryCredential: true, - assumedRole: tempCredData.assumedRole, - principalUuid: tempCredData.userUuid, - expiration: tempCredData.expiration, - signingKey: redisSigResult.signingKey - }); - return; - } - - // Not found in Redis - try UFDS if available - if (ufds) { - log.info({ - accessKeyId: authInfo.accessKeyId - }, 'sigv4.verify: Temporary credential' + - ' not in Redis, trying UFDS'); - handleTemporaryCredential(authInfo, - sessionToken, - req, - log, - ufds, - cb); - return; - } - - log.error({ - accessKeyId: authInfo.accessKeyId - }, 'sigv4.verify: Temporary credential' + - ' not found in Redis and no UFDS available'); - cb(new errors.InvalidSignatureError( - 'Cannot verify temporary credentials')); - return; + handleTemporaryCredentialRedis( + authInfo, tokenData, sessionToken, + req, log, redis, ufds, cb); }); return; - }); // Close JWT verification callback - return; } // Detect temporary credentials (MSTS or MSAR) used @@ -1012,7 +1620,7 @@ function verifySigV4(opts, cb) { hasSessionToken: !!sessionToken, sessionTokenLength: sessionToken ? sessionToken.length : 0 }, 'SECURITY: Temporary access key used without' + - ' session token - BLOCKING'); + ' session token - BLOCKING'); cb(new errors.InvalidSignatureError( 'Temporary credentials' + @@ -1020,157 +1628,36 @@ function verifySigV4(opts, cb) { return; } - // Handle permanent credentials - look up from Redis - // Look up user by access key ID - var accessKeyLookupKey = sprintf('/accesskey/%s', - authInfo.accessKeyId); - redis.get(accessKeyLookupKey, function (err, userUuid) { - if (err) { - cb(new errors.RedisError(err)); - return; - } - - if (!userUuid) { - cb(new errors.InvalidSignatureError( - 'Invalid access key')); - return; - } - - // Get user's access keys - var userKey = sprintf('/uuid/%s', userUuid); - redis.get(userKey, function (userErr, userRes) { - if (userErr) { - cb(new errors.RedisError(userErr)); - return; - } - - if (!userRes) { - cb(new errors.InvalidSignatureError( - 'User not found')); - return; - } - - var user = JSON.parse(userRes); - if (!user.accesskeys || - !user.accesskeys[authInfo.accessKeyId]) { - cb(new errors.InvalidSignatureError( - 'Access key not found')); - return; - } - - var secretKey = user.accesskeys[authInfo.accessKeyId]; - var timestamp = req.headers['x-amz-date'] || - req.headers.date; - if (!timestamp) { - cb(new errors.InvalidSignatureError( - 'Missing timestamp')); - return; - } - - // According to AWS S3 documentation a 15 minutes - // threshold is used as a security mechanism to - // prevent replay attacks. - // https://docs.aws.amazon.com/AmazonS3/latest/API/\ - // sig-v4-authenticating-requests.html - var requestTime = - parseISO8601Basic(timestamp).getTime(); - var currentTime = Date.now(); - var timeDiff = Math.abs(currentTime - requestTime); - - // Y2038 detection: Log warning for post-Y2038 - // timestamps. JavaScript Date arithmetic works - // correctly on all platforms, so freshness check - // proceeds normally. - // TODO: Remove this check after migrating to 64-bit - // Node.js (Y2038 only affects 32-bit systems). - if (requestTime > Y2038_THRESHOLD_MS) { - log.warn({ - requestTimestamp: timestamp, - requestTimeMs: requestTime, - systemTimeMs: currentTime, - timeDiff: timeDiff - }, 'Y2038: Request timestamp beyond Y2038 ' + - 'threshold. Timestamp validation proceeds ' + - 'normally.'); - } - - if (timeDiff > 15 * 60 * 1000) { // 15 minutes - cb(new errors.InvalidSignatureError( - 'Request timestamp too old')); - return; - } - - // Build canonical request using original request data - // from query params. - // The original method and URL are passed as query - // parameters to /aws-verify - var originalMethod = req.query.method || req.method; - var originalUrl = req.query.url || req.url; - // URL decode the originalUrl if it comes from query - // params (fixes Cyberduck compatibility without - // affecting AWS CLI) - - var uri = originalUrl.split('?')[0]; - if (req.query.url) { - originalUrl = decodeURIComponent(originalUrl); - uri = decodeURIComponent(uri); - } - - var queryString = originalUrl.split('?')[1] || ''; - var payloadHash = req.headers['x-amz-content-sha256'] || - 'UNSIGNED-PAYLOAD'; - - var canonicalRequest = createCanonicalRequest( - originalMethod, uri, queryString, req.headers, - authInfo.signedHeaders, payloadHash); - - // Print hexdump of canonicalRequest - log.debug('canonicalRequest hexdump:\n'+ - utils.hexdump(canonicalRequest)); - - // Create string to sign - var credentialScope = sprintf('%s/%s/%s/aws4_request', - authInfo.dateStamp, authInfo.region, - authInfo.service); - var stringToSign = createStringToSign(timestamp, - credentialScope, canonicalRequest); - - log.debug('stringToSign hexdump:\n'+ - utils.hexdump(stringToSign)); - - // Calculate expected signature - var sigResult = calculateSignature( - secretKey, authInfo.dateStamp, authInfo.region, - authInfo.service, stringToSign); - - // Compare signatures - if (sigResult.signature !== authInfo.signature) { - log.debug({ - expected: sigResult.signature, - received: authInfo.signature, - stringToSign: stringToSign, - canonicalRequest: canonicalRequest - }, 'Signature mismatch'); - cb(new errors.InvalidSignatureError( - 'Signature mismatch')); - return; - } + /* + * Handle permanent credentials via Redis. + * + * Short-circuit with the negative cache so that + * repeated requests with the same invalid key ID + * do not hit Redis on every attempt. Only check + * when UFDS is unavailable — when UFDS is present, + * a key cached as missing in Redis may still be + * found via UFDS read-through (newly created key). + */ + if (!ufds && + isNegativelyCached(authInfo.accessKeyId)) { + log.debug({ + accessKeyId: authInfo.accessKeyId + }, 'sigv4: rejected by negative cache'); + cb(new errors.InvalidSignatureError( + 'Invalid access key')); + return; + } - log.debug({accessKeyId: authInfo.accessKeyId, - userUuid: userUuid}, - 'SigV4 verification successful'); - cb(null, { - user: user, - accessKeyId: authInfo.accessKeyId, - signingKey: sigResult.signingKey - }); - return; - }); - return; - }); + handlePermanentCredentialRedis( + authInfo, req, log, redis, ufds, cb); } module.exports = { parseAuthHeader: parseAuthHeader, - verifySigV4: verifySigV4 + verifySigV4: verifySigV4, + _handleTemporaryCredential: handleTemporaryCredential, + _handlePermanentCredentialRedis: handlePermanentCredentialRedis, + _handleTemporaryCredentialRedis: handleTemporaryCredentialRedis, + _buildPermanentResult: buildPermanentResult, + _buildTemporaryResult: buildTemporaryResult }; diff --git a/lib/server/sts.js b/lib/server/sts.js index e7fa76b..8b3922c 100644 --- a/lib/server/sts.js +++ b/lib/server/sts.js @@ -926,11 +926,23 @@ function assumeRole(req, res, next) { var callerLogin = caller.user && caller.user.login ? caller.user.login : caller.account.login; + /** + * @type {string} callerBucketScope - Bucket scope + * inherited from the parent access key. When the parent + * key carries a scope JSON string, the temp credential + * inherits it. When the parent key is unscoped, the + * sentinel "none" is stored so that enforceBucketScope + * can distinguish "parent was unscoped" from "scope was + * lost in transit." + */ + var callerBucketScope = caller.bucketScope || 'none'; + // SECURITY DEBUG: Log what caller info we received log.debug({ callerHasRoleArn: !!caller.roleArn, callerRoleArn: caller.roleArn, callerLogin: callerLogin, + callerBucketScope: callerBucketScope, callerKeys: Object.keys(caller) }, 'SECURITY DEBUG: AssumeRole received caller info'); @@ -1228,6 +1240,8 @@ function assumeRole(req, res, next) { policies: roleData.permissionPolicies || [] }, credentialType: 'temporary', + /* bucket scope from parent key (string|null) */ + bucketScope: callerBucketScope, created: now }; @@ -1251,10 +1265,22 @@ function assumeRole(req, res, next) { updated: now }; + /* + * Only persist the bucket scope LDAP attribute when the + * parent key actually carried a scope. Storing null in + * LDAP is not meaningful and would break filters. + */ + if (callerBucketScope) { + ldapObject.accesskeyscope = callerBucketScope; + } + log.info({ accessKeyId: tempAccessKeyId, roleArnBeingStored: roleArn, ldapObjectAssumedrole: ldapObject.assumedrole, + ldapObjectAccesskeyscope: + ldapObject.accesskeyscope || null, + callerBucketScope: callerBucketScope, areTheyEqual: roleArn === ldapObject.assumedrole }, 'SECURITY DEBUG: Storing assumedrole in UFDS: ' + ldapObject.assumedrole); @@ -1473,7 +1499,7 @@ function createSessionTokenData(callerUuid, expiration) { * @returns {Object} LDAP object for UFDS */ function buildLdapObjectForSessionToken(params) { - return { + var obj = { objectclass: ['accesskey'], accesskeyid: params.accessKeyId, accesskeysecret: params.secretKey, @@ -1485,6 +1511,10 @@ function buildLdapObjectForSessionToken(params) { created: Date.now().toString(), updated: Date.now().toString() }; + if (params.bucketScope) { + obj.accesskeyscope = params.bucketScope; + } + return (obj); } /** @@ -1500,7 +1530,7 @@ function buildLdapObjectForSessionToken(params) { * @returns {Object} Access key data for Redis */ function buildAccessKeyDataForRedis(params) { - return { + var data = { type: 'accesskey', accessKeyId: params.accessKeyId, secretAccessKey: params.secretAccessKey, @@ -1511,6 +1541,10 @@ function buildAccessKeyDataForRedis(params) { created: Date.now().toString(), principalUuid: params.principalUuid }; + if (params.bucketScope) { + data.bucketScope = params.bucketScope; + } + return (data); } /** @@ -1585,6 +1619,7 @@ function getSessionToken(req, res, next) { var callerIdentity = extractCallerIdentity(caller); var callerUuid = callerIdentity.uuid; var callerLogin = callerIdentity.login; + var callerBucketScope = caller.bucketScope || 'none'; log.debug({ callerUuid: callerUuid, @@ -1689,7 +1724,8 @@ function getSessionToken(req, res, next) { secretKey: tempSecretKey, sessionToken: secureSessionToken, expiration: expiration, - principalUuid: callerUuid + principalUuid: callerUuid, + bucketScope: callerBucketScope }); log.debug({ @@ -1755,7 +1791,8 @@ function getSessionToken(req, res, next) { sessionToken: secureSessionToken, userUuid: callerUuid, expiration: expiration, - principalUuid: callerUuid + principalUuid: callerUuid, + bucketScope: callerBucketScope }); var redisKey = '/accesskey/' + tempAccessKeyId; diff --git a/package.json b/package.json index 06c7abc..816f18b 100644 --- a/package.json +++ b/package.json @@ -23,7 +23,7 @@ "once": "1.3.0", "redis": "0.10.0", "restify": "2.6.1", - "ufds": "1.9.1", + "ufds": "git+https://github.com/TritonDataCenter/node-ufds.git#f8ea6ad", "uuid": "3.4.0", "vasync": "1.4.3", "xtend": "2.1.2", diff --git a/sapi_manifests/mahi2/template b/sapi_manifests/mahi2/template index 08f7676..e831365 100644 --- a/sapi_manifests/mahi2/template +++ b/sapi_manifests/mahi2/template @@ -8,7 +8,7 @@ "tlsOptions": { "rejectUnauthorized": false }, - "interval": 10000 + "interval": 2000 }, "ufdsCfg": { "url": "{{UFDS_URL}}", diff --git a/test/accesskey-scope.test.js b/test/accesskey-scope.test.js new file mode 100644 index 0000000..2d805d2 --- /dev/null +++ b/test/accesskey-scope.test.js @@ -0,0 +1,2251 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +/* + * Copyright 2026 Edgecast Cloud LLC. + */ + +/* + * Unit tests for per-bucket access key scope support across: + * - Replicator transforms (add, modify, delete scoped keys) + * - SigV4 verification (scoped permanent + temporary credentials) + * - STS helper functions (scope inheritance) + */ + +var transform = require('../lib/replicator/transforms/accesskey.js'); +var sigv4 = require('../lib/server/sigv4'); +var sts = require('../lib/server/sts.js'); +var SigV4Helper = require('./lib/sigv4-helper'); +var crypto = require('crypto'); +var bunyan = require('bunyan'); + +var redis = require('fakeredis'); +var REDIS; + +var nodeunit = require('nodeunit-plus'); +var test = nodeunit.test; + +var helper = new SigV4Helper({region: 'us-east-1', service: 's3'}); + +var SCOPE_JSON = JSON.stringify({ + version: 1, + permissions: [ + { bucket: 'app-data', level: 'readwrite' }, + { bucket: 'logs-*', level: 'read' } + ] +}); + +var USER_UUID = '550e8400-e29b-41d4-a716-446655440099'; +var SCOPED_KEY_ID = 'AKIASCOPED00000000001'; +var SCOPED_SECRET = 'scopedSecretKeyForTesting123456789abcdef0'; +var UNSCOPED_KEY_ID = 'AKIAUNSCOPED000000001'; +var UNSCOPED_SECRET = 'unscopedSecretKeyForTesting12345678abcde'; + +/* + * PART 1: Replicator transforms — scoped key add/modify/delete + */ + +test('setup - fresh redis', function (t) { + REDIS = redis.createClient(); + t.done(); +}); + +/* --- add: scoped permanent key --- */ + +test('add - scoped permanent key stores object format in Redis', + function (t) { + var entry = { + dn: 'changenumber=100, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + SCOPED_KEY_ID + + ', uuid=' + USER_UUID + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }, + changenumber: '100' + }; + + var args = { + changes: entry.changes, + entry: entry, + log: this.log, + redis: REDIS + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + transform.add(args, function (err, res) { + t.ok(!err, 'add should not error'); + t.strictEqual(3, res.queue.length, + 'should have 3 redis operations'); + + res.exec(function (execErr) { + t.ok(!execErr, 'exec should not error'); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr, 'redis get should not error'); + var payload = JSON.parse(userRes); + var keyData = payload.accesskeys[SCOPED_KEY_ID]; + + // Scoped key must be stored as object, not string + t.equal(typeof (keyData), 'object', + 'scoped key should be stored as object'); + t.equal(keyData.secret, SCOPED_SECRET, + 'secret should be correct'); + t.equal(keyData.scope, SCOPE_JSON, + 'scope JSON should be preserved'); + + // Reverse lookup should be JSON with userUuid + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr, 'reverse lookup should not error'); + var lookupData = JSON.parse(lookupRes); + t.equal(lookupData.type, 'accesskey', + 'lookup should have type'); + t.equal(lookupData.userUuid, USER_UUID, + 'lookup should have correct userUuid'); + t.equal(lookupData.credentialType, 'permanent', + 'lookup should have correct credentialType'); + t.equal(lookupData.scope, SCOPE_JSON, + 'lookup should have scope'); + t.done(); + }); + }); + }); + }); +}); + +/* --- add: unscoped key alongside scoped key --- */ + +test('add - unscoped key uses unified object format', + function (t) { + var entry = { + dn: 'changenumber=101, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + UNSCOPED_KEY_ID + + ', uuid=' + USER_UUID + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [UNSCOPED_KEY_ID], + accesskeysecret: [UNSCOPED_SECRET], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }, + changenumber: '101' + }; + + var args = { + changes: entry.changes, + entry: entry, + log: this.log, + redis: REDIS + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + UNSCOPED_KEY_ID; + + transform.add(args, function (err, res) { + t.ok(!err, 'add should not error'); + res.exec(function (execErr) { + t.ok(!execErr, 'exec should not error'); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + + // Unscoped key should be object with scope: null + var unscopedData = + payload.accesskeys[UNSCOPED_KEY_ID]; + t.equal(typeof (unscopedData), 'object', + 'unscoped key should be stored as object'); + t.equal(unscopedData.secret, UNSCOPED_SECRET, + 'unscoped secret should be correct'); + t.equal(unscopedData.scope, null, + 'unscoped key should have scope: null'); + + // Scoped key should still be object + t.equal( + typeof (payload.accesskeys[SCOPED_KEY_ID]), + 'object', + 'scoped key should still be object'); + + // Reverse lookup should be JSON with scope: null + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr); + var lookupData = JSON.parse(lookupRes); + t.equal(lookupData.userUuid, USER_UUID, + 'unscoped reverse lookup userUuid'); + t.equal(lookupData.credentialType, + 'permanent', + 'unscoped reverse lookup type'); + t.equal(lookupData.scope, null, + 'unscoped reverse lookup scope null'); + t.done(); + }); + }); + }); + }); +}); + +/* --- modify: scope-only change --- */ + +test('modify - scope-only change updates Redis', function (t) { + var newScope = JSON.stringify({ + version: 1, + permissions: [ + { bucket: 'new-bucket', level: 'full' } + ] + }); + + var modEntry = { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [newScope], + created: ['1761760834864'], + objectclass: ['accesskey'], + status: ['Active'], + updated: ['1761760874472'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }; + + var changes = [ + { + operation: 'replace', + modification: { + type: 'accesskeyscope', + vals: [newScope] + } + }, + { + operation: 'replace', + modification: { + type: 'updated', + vals: ['1761760874472'] + } + } + ]; + + var opts = { + log: this.log, + redis: REDIS, + changes: changes, + modEntry: modEntry, + entry: { changenumber: '999' } + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + transform.modify(opts, function (err, res) { + t.ok(!err, 'modify should not error'); + t.ok(res, 'modify should return batch'); + // Should NOT be a NOP (must have redis writes) + t.ok(res.queue.length > 1, + 'should have redis operations (not NOP)'); + + res.exec(function (execErr) { + t.ok(!execErr, 'exec should not error'); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + var keyData = payload.accesskeys[SCOPED_KEY_ID]; + t.equal(typeof (keyData), 'object', + 'scoped key should still be object'); + t.equal(keyData.scope, newScope, + 'scope should be updated to new value'); + t.equal(keyData.secret, SCOPED_SECRET, + 'secret should be preserved'); + + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr); + var lookupData = JSON.parse(lookupRes); + t.equal(lookupData.scope, newScope, + 'reverse lookup scope should be updated'); + t.done(); + }); + }); + }); + }); +}); + +/* --- modify: scope removal sets scope to null --- */ + +test('modify - scope removal stores object with scope null', + function (t) { + var modEntry = { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + created: ['1761760834864'], + objectclass: ['accesskey'], + status: ['Active'], + updated: ['1761760874472'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }; + + var changes = [ + { + operation: 'delete', + modification: { + type: 'accesskeyscope', + vals: [] + } + }, + { + operation: 'replace', + modification: { + type: 'updated', + vals: ['1761760874472'] + } + } + ]; + + var opts = { + log: this.log, + redis: REDIS, + changes: changes, + modEntry: modEntry, + entry: { changenumber: '999' } + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + transform.modify(opts, function (err, res) { + t.ok(!err, 'modify should not error'); + t.ok(res.queue.length > 1, + 'should have redis operations (not NOP)'); + + res.exec(function (execErr) { + t.ok(!execErr, 'exec should not error'); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + var keyData = payload.accesskeys[SCOPED_KEY_ID]; + // After scope removal, key should be object + // with scope: null + t.equal(typeof (keyData), 'object', + 'key should remain object format'); + t.equal(keyData.secret, SCOPED_SECRET, + 'secret should be correct'); + t.equal(keyData.scope, null, + 'scope should be null after removal'); + + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr); + var lookupData = JSON.parse(lookupRes); + t.equal(lookupData.userUuid, USER_UUID, + 'reverse lookup userUuid correct'); + t.equal(lookupData.scope, null, + 'reverse lookup scope should be null'); + t.done(); + }); + }); + }); + }); +}); + +/* --- modify: status change on scoped key preserves scope --- */ + +test('modify - deactivate scoped key removes from Redis', + function (t) { + // First re-add the scoped key so we can deactivate it + var addEntry = { + dn: 'changenumber=102, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + SCOPED_KEY_ID + + ', uuid=' + USER_UUID + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }, + changenumber: '102' + }; + + var log = this.log; + + transform.add({ + changes: addEntry.changes, + entry: addEntry, + log: log, + redis: REDIS + }, function (addErr, addRes) { + t.ok(!addErr, 'add should not error'); + addRes.exec(function () { + // Now deactivate + var modEntry = { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761760834864'], + objectclass: ['accesskey'], + status: ['Inactive'], + updated: ['1761760874472'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + + ', ou=users, o=smartdc'] + }; + + var changes = [ + { + operation: 'replace', + modification: { + type: 'status', + vals: ['Inactive'] + } + }, + { + operation: 'replace', + modification: { + type: 'updated', + vals: ['1761760874472'] + } + } + ]; + + transform.modify({ + log: log, + redis: REDIS, + changes: changes, + modEntry: modEntry, + entry: { changenumber: '999' } + }, function (modErr, modRes) { + t.ok(!modErr, 'modify should not error'); + t.strictEqual(3, modRes.queue.length, + 'should have 3 redis operations'); + + modRes.exec(function () { + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + t.equal( + payload.accesskeys[SCOPED_KEY_ID], + undefined, + 'scoped key should be removed'); + + REDIS.get(lookupKey, + function (lErr, lookupRes) { + t.ok(!lErr); + t.equal(lookupRes, null, + 'reverse lookup should be removed'); + t.done(); + }); + }); + }); + }); + }); + }); +}); + +/* --- modify: reactivate scoped key restores scope --- */ + +test('modify - reactivate scoped key restores object format', + function (t) { + var modEntry = { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761760834864'], + objectclass: ['accesskey'], + status: ['Active'], + updated: ['1761760874472'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }; + + var changes = [ + { + operation: 'replace', + modification: { + type: 'status', + vals: ['Active'] + } + }, + { + operation: 'replace', + modification: { + type: 'updated', + vals: ['1761760874472'] + } + } + ]; + + var opts = { + log: this.log, + redis: REDIS, + changes: changes, + modEntry: modEntry, + entry: { changenumber: '999' } + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + transform.modify(opts, function (err, res) { + t.ok(!err, 'modify should not error'); + t.strictEqual(3, res.queue.length, + 'should have 3 redis operations'); + + res.exec(function (execErr) { + t.ok(!execErr); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + var keyData = payload.accesskeys[SCOPED_KEY_ID]; + t.equal(typeof (keyData), 'object', + 'reactivated scoped key should be object'); + t.equal(keyData.secret, SCOPED_SECRET, + 'secret should be correct'); + t.equal(keyData.scope, SCOPE_JSON, + 'scope should be preserved on reactivation'); + + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr); + var lookupData = JSON.parse(lookupRes); + t.equal(lookupData.userUuid, USER_UUID, + 'reverse lookup UUID should be correct'); + t.equal(lookupData.scope, SCOPE_JSON, + 'reverse lookup scope should be correct'); + t.done(); + }); + }); + }); + }); +}); + +/* --- delete: scoped key --- */ + +test('delete - scoped key is fully cleaned up', function (t) { + var entry = { + dn: 'changenumber=103, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + SCOPED_KEY_ID + + ', uuid=' + USER_UUID + ', ou=users, o=smartdc', + changetype: 'delete', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + objectclass: ['accesskey'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + ', ou=users, o=smartdc'] + }, + changenumber: '103' + }; + + var args = { + changes: entry.changes, + entry: entry, + log: this.log, + redis: REDIS + }; + + var userKey = '/uuid/' + USER_UUID; + var lookupKey = '/accesskey/' + SCOPED_KEY_ID; + + transform.delete(args, function (err, res) { + t.ok(!err, 'delete should not error'); + t.strictEqual(3, res.queue.length, + 'should have 3 redis operations'); + + res.exec(function (execErr) { + t.ok(!execErr); + + REDIS.get(userKey, function (getErr, userRes) { + t.ok(!getErr); + var payload = JSON.parse(userRes); + t.equal(payload.accesskeys[SCOPED_KEY_ID], + undefined, + 'scoped key should be removed from user'); + + // Unscoped key should still be there + var unscopedData = + payload.accesskeys[UNSCOPED_KEY_ID]; + t.equal(typeof (unscopedData), 'object', + 'unscoped key should remain as object'); + t.equal(unscopedData.secret, UNSCOPED_SECRET, + 'unscoped key secret should remain'); + + REDIS.get(lookupKey, function (lErr, lookupRes) { + t.ok(!lErr); + t.equal(lookupRes, null, + 'reverse lookup should be removed'); + t.done(); + }); + }); + }); + }); +}); + +/* + * PART 2: SigV4 verification — scoped permanent credentials + */ + +/* + * Helper: set up user in Redis and run sigv4 verify. + * Supports both string (unscoped) and object (scoped) key formats. + */ +function setupAndVerify(opts, t, callback) { + var user = opts.user; + var accessKeyId = opts.accessKeyId; + var secret = opts.secret; + var lookupVal = opts.lookupVal || user.uuid; + + var log = bunyan.createLogger({ + name: 'scope-sigv4-test', + level: 'fatal' + }); + + REDIS.set('/uuid/' + user.uuid, JSON.stringify(user), + function (err1) { + if (err1) { + return (callback(err1)); + } + return (REDIS.set('/accesskey/' + accessKeyId, lookupVal, + function (err2) { + if (err2) { + return (callback(err2)); + } + + var headers = helper.createHeaders({ + method: opts.method || 'GET', + path: opts.path || '/bucket/key', + accessKey: accessKeyId, + secret: secret + }); + + var payloadHash = crypto.createHash('sha256') + .update('', 'utf8').digest('hex'); + headers['x-amz-content-sha256'] = payloadHash; + + var req = { + method: opts.method || 'GET', + url: opts.path || '/bucket/key', + headers: headers, + query: {} + }; + + return (sigv4.verifySigV4({ + req: req, + log: log, + redis: REDIS + }, callback)); + })); + }); +} + +test('sigv4 - unscoped key returns null bucketScope', function (t) { + var user = { + uuid: 'sigv4-user-unscoped', + login: 'sigv4user', + accesskeys: { + 'AKIASIGV4UNSCOPED01': { + secret: 'secretForSigv4UnscopedTest1234567890', + scope: null + } + } + }; + + setupAndVerify({ + user: user, + accessKeyId: 'AKIASIGV4UNSCOPED01', + secret: 'secretForSigv4UnscopedTest1234567890', + method: 'GET', + path: '/test-bucket/object.txt' + }, t, function (err, result) { + t.ok(!err, 'should not error'); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, null, + 'unscoped key should have null bucketScope'); + t.done(); + }); +}); + +test('sigv4 - scoped permanent key returns bucketScope', function (t) { + var scopedSecret = 'scopedSigv4SecretForTesting123456789ab'; + + var user = { + uuid: 'sigv4-user-scoped', + login: 'sigv4scoped', + accesskeys: { + 'AKIASIGV4SCOPED001': { + secret: scopedSecret, + scope: SCOPE_JSON + } + } + }; + + // For scoped permanent keys, the reverse lookup is JSON + var lookupData = JSON.stringify({ + type: 'accesskey', + accessKeyId: 'AKIASIGV4SCOPED001', + userUuid: 'sigv4-user-scoped', + credentialType: 'permanent', + scope: SCOPE_JSON + }); + + setupAndVerify({ + user: user, + accessKeyId: 'AKIASIGV4SCOPED001', + secret: scopedSecret, + lookupVal: lookupData, + method: 'GET', + path: '/app-data/file.txt' + }, t, function (err, result) { + t.ok(!err, 'should not error: ' + (err ? err.message : '')); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, SCOPE_JSON, + 'scoped key should return bucketScope'); + t.equal(result.accessKeyId, 'AKIASIGV4SCOPED001', + 'should have correct access key ID'); + t.done(); + }); +}); + +test('sigv4 - corrupt reverse lookup JSON returns error', + function (t) { + var secret = 'corruptLookupTestSecret12345678901234'; + var user = { + uuid: 'sigv4-user-corrupt', + login: 'corrupt', + accesskeys: { + 'AKIACORRUPTLOOKUP01': secret + } + }; + + // Store corrupt JSON in the reverse lookup + setupAndVerify({ + user: user, + accessKeyId: 'AKIACORRUPTLOOKUP01', + secret: secret, + lookupVal: '{corrupt json!!!', + method: 'GET', + path: '/bucket/key' + }, t, function (err, result) { + t.ok(err, 'should error on corrupt reverse lookup'); + t.done(); + }); +}); + +/* + * PART 3: STS helpers — scope in builder functions + */ + +var buildLdapObj = sts.helpers.buildLdapObjectForSessionToken; +var buildRedisData = sts.helpers.buildAccessKeyDataForRedis; + +test('buildLdapObjectForSessionToken - includes scope when present', + function (t) { + var obj = buildLdapObj({ + accessKeyId: 'MSTSTEST00000000001', + secretKey: 'tempSecret123', + sessionToken: 'jwt-token-here', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-123', + bucketScope: SCOPE_JSON + }); + + t.equal(obj.accesskeyid, 'MSTSTEST00000000001'); + t.equal(obj.credentialtype, 'temporary'); + t.equal(obj.accesskeyscope, SCOPE_JSON, + 'should include accesskeyscope when scope is present'); + t.done(); +}); + +test('buildLdapObjectForSessionToken - omits scope when null', + function (t) { + var obj = buildLdapObj({ + accessKeyId: 'MSTSTEST00000000002', + secretKey: 'tempSecret456', + sessionToken: 'jwt-token-here', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-123', + bucketScope: null + }); + + t.equal(obj.accesskeyscope, undefined, + 'should NOT include accesskeyscope when scope is null'); + t.done(); +}); + +test('buildLdapObjectForSessionToken - omits scope when absent', + function (t) { + var obj = buildLdapObj({ + accessKeyId: 'MSTSTEST00000000003', + secretKey: 'tempSecret789', + sessionToken: 'jwt-token-here', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-123' + }); + + t.equal(obj.accesskeyscope, undefined, + 'should NOT include accesskeyscope when not provided'); + t.done(); +}); + +test('buildAccessKeyDataForRedis - includes scope when present', + function (t) { + var data = buildRedisData({ + accessKeyId: 'MSTSTEST00000000004', + secretAccessKey: 'tempSecret111', + sessionToken: 'jwt-token-here', + userUuid: 'user-uuid-456', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-456', + bucketScope: SCOPE_JSON + }); + + t.equal(data.accessKeyId, 'MSTSTEST00000000004'); + t.equal(data.credentialType, 'temporary'); + t.equal(data.bucketScope, SCOPE_JSON, + 'should include bucketScope when present'); + t.done(); +}); + +test('buildAccessKeyDataForRedis - omits scope when null', + function (t) { + var data = buildRedisData({ + accessKeyId: 'MSTSTEST00000000005', + secretAccessKey: 'tempSecret222', + sessionToken: 'jwt-token-here', + userUuid: 'user-uuid-456', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-456', + bucketScope: null + }); + + t.equal(data.bucketScope, undefined, + 'should NOT include bucketScope when null'); + t.done(); +}); + +test('buildAccessKeyDataForRedis - omits scope when absent', + function (t) { + var data = buildRedisData({ + accessKeyId: 'MSTSTEST00000000006', + secretAccessKey: 'tempSecret333', + sessionToken: 'jwt-token-here', + userUuid: 'user-uuid-456', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'user-uuid-456' + }); + + t.equal(data.bucketScope, undefined, + 'should NOT include bucketScope when not provided'); + t.done(); +}); + +/* + * PART 4: SigV4 — temporary credentials with scope + */ + +test('sigv4 - temp credential with bucketScope returns it', + function (t) { + /* + * Temporary credentials use JWT session tokens for auth. + * We can't easily mock the full JWT flow here, but we CAN + * verify that the Redis credential data structure includes + * bucketScope when present, by checking the builder output + * that gets stored in Redis. + */ + var data = buildRedisData({ + accessKeyId: 'MSARTEMP0000000001', + secretAccessKey: 'tempScopedSecret999', + sessionToken: 'jwt-for-scoped-temp', + userUuid: 'temp-user-uuid', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'temp-user-uuid', + bucketScope: SCOPE_JSON + }); + + t.equal(data.bucketScope, SCOPE_JSON, + 'temp credential data should carry bucketScope'); + t.equal(data.credentialType, 'temporary', + 'should be temporary credential'); + t.equal(data.accessKeyId, 'MSARTEMP0000000001', + 'access key ID should be correct'); + t.done(); +}); + +test('sigv4 - temp credential without scope has no bucketScope', + function (t) { + var data = buildRedisData({ + accessKeyId: 'MSARTEMP0000000002', + secretAccessKey: 'tempUnscopedSecret888', + sessionToken: 'jwt-for-unscoped-temp', + userUuid: 'temp-user-uuid', + expiration: new Date('2026-04-18T14:00:00Z'), + principalUuid: 'temp-user-uuid' + }); + + t.equal(data.bucketScope, undefined, + 'temp credential without scope should have no bucketScope'); + t.done(); +}); + +/* + * PART 5: UFDS fallback path — bucketScope in handleTemporaryCredential + * + * When a temporary credential is not found in Redis (e.g. after restart), + * sigv4 falls back to UFDS lookup via handleTemporaryCredential. These + * tests verify that the result includes bucketScope from the UFDS record. + */ + +var handleTempCred = sigv4._handleTemporaryCredential; + +/* + * Helper: build mocks and call handleTemporaryCredential. + * + * opts.accessKeyId — temp key ID + * opts.secret — temp secret key + * opts.sessionToken — session token string + * opts.principalUuid — user UUID stored on the UFDS credential + * opts.accesskeyscope — scope JSON string (or omit for unscoped) + * opts.assumedrole — optional role ARN + */ +function runUfdsFallback(opts, t, callback) { + var log = bunyan.createLogger({ + name: 'ufds-fallback-test', + level: 'fatal' + }); + + var user = { + uuid: opts.principalUuid, + login: 'ufds-fallback-user', + accesskeys: {} + }; + + /* Store principal user in Redis (required by the function) */ + REDIS.set('/uuid/' + opts.principalUuid, JSON.stringify(user), + function (redisErr) { + if (redisErr) { + return (callback(redisErr)); + } + + /* Generate valid SigV4 headers with the temp secret */ + var method = 'GET'; + var path = '/test-bucket/obj.txt'; + var headers = helper.createHeaders({ + method: method, + path: path, + accessKey: opts.accessKeyId, + secret: opts.secret + }); + + /* Parse auth header to build authInfo */ + var authInfo = sigv4.parseAuthHeader(headers.authorization); + + /* Build mock UFDS that returns the credential record */ + var credRecord = { + accesskeyid: opts.accessKeyId, + accesskeysecret: opts.secret, + sessiontoken: opts.sessionToken, + principaluuid: opts.principalUuid, + credentialtype: 'temporary', + expiration: new Date(Date.now() + 3600000).toISOString() + }; + if (opts.assumedrole) { + credRecord.assumedrole = opts.assumedrole; + } + if (opts.accesskeyscope) { + credRecord.accesskeyscope = opts.accesskeyscope; + } + var mockUfds = { + search: function (_base, _searchOpts, cb) { + cb(null, [credRecord]); + } + }; + + /* Build req object expected by handleTemporaryCredential */ + var req = { + method: method, + url: path, + headers: headers, + query: {}, + redis: REDIS + }; + + return (handleTempCred(authInfo, opts.sessionToken, + req, log, mockUfds, callback)); + }); +} + +test('UFDS fallback - scoped temp credential returns bucketScope', + function (t) { + runUfdsFallback({ + accessKeyId: 'UFDSSCOPED000000001', + secret: 'ufdsSecretScoped1234567890abcdef012345', + sessionToken: 'session-token-scoped-ufds', + principalUuid: 'ufds-scoped-user-uuid-001', + accesskeyscope: SCOPE_JSON, + assumedrole: 'arn:aws:iam::acct:role/TestRole' + }, t, function (err, result) { + t.ifError(err, 'should not error'); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, SCOPE_JSON, + 'UFDS fallback should return bucketScope from accesskeyscope'); + t.equal(result.isTemporaryCredential, true, + 'should be marked as temporary credential'); + t.equal(result.assumedRole, + 'arn:aws:iam::acct:role/TestRole', + 'should include assumedRole'); + t.done(); + }); +}); + +test('UFDS fallback - unscoped temp credential returns null bucketScope', + function (t) { + runUfdsFallback({ + accessKeyId: 'UFDSUNSCOPED0000001', + secret: 'ufdsSecretUnscoped234567890abcdef01234', + sessionToken: 'session-token-unscoped-ufds', + principalUuid: 'ufds-unscoped-user-uuid-01' + }, t, function (err, result) { + t.ifError(err, 'should not error'); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, null, + 'UFDS fallback without scope should return null bucketScope'); + t.done(); + }); +}); + +/* + * PART 6: Cross-path format compatibility + * + * Verifies that scope JSON survives a full round-trip via each + * write path — replicator transform.add() and direct cachePush + * format — when read back by sigv4.verifySigV4(). + * + * These tests guard against format divergence between the two + * paths, which currently write identical Redis structures but + * have no shared test. + */ + +var RT_UUID_REPL = 'rt-replicator-test-uuid-001'; +var RT_KEY_REPL = 'AKIARTREPL000000001'; +var RT_SECRET_REPL = 'rtReplSecretForTesting1234567890abc'; + +var RT_UUID_CACHE = 'rt-cachepush-test-uuid-001'; +var RT_KEY_CACHE = 'AKIARTCACHE000000001'; +var RT_SECRET_CACHE = 'rtCacheSecretForTesting123456789abc'; + +var RT_SCOPE_JSON = JSON.stringify({ + version: 1, + permissions: [ + { bucket: 'cross-path-test', level: 'readwrite' } + ] +}); + +test('cross-path: replicator transform.add() scope survives sigv4 round-trip', + function (t) { + var log = bunyan.createLogger({ + name: 'rt-repl-test', + level: 'fatal' + }); + + var entry = { + changes: { + accesskeyid: [RT_KEY_REPL], + accesskeysecret: [RT_SECRET_REPL], + accesskeyscope: [RT_SCOPE_JSON], + status: ['Active'], + objectclass: ['accesskey'], + _owner: [RT_UUID_REPL], + _parent: ['uuid=' + RT_UUID_REPL + + ', ou=users, o=smartdc'] + } + }; + + /* Write via replicator transform */ + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS + }, function (addErr, batch) { + t.ok(!addErr, 'transform.add should not error'); + batch.exec(function (execErr) { + t.ok(!execErr, 'exec should not error'); + + /* Sign a request with the same key */ + var headers = helper.createHeaders({ + method: 'GET', + path: '/cross-path-test/obj.txt', + accessKey: RT_KEY_REPL, + secret: RT_SECRET_REPL + }); + headers['x-amz-content-sha256'] = + crypto.createHash('sha256') + .update('', 'utf8').digest('hex'); + + sigv4.verifySigV4({ + req: { + method: 'GET', + url: '/cross-path-test/obj.txt', + headers: headers, + query: {} + }, + log: log, + redis: REDIS + }, function (verErr, result) { + t.ok(!verErr, + 'sigv4 verify should not error: ' + + (verErr ? verErr.message : '')); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, RT_SCOPE_JSON, + 'scope must survive replicator → ' + + 'sigv4 round-trip unchanged'); + t.done(); + }); + }); + }); +}); + +test('cross-path: cachePush format scope survives sigv4 round-trip', + function (t) { + var log = bunyan.createLogger({ + name: 'rt-cache-test', + level: 'fatal' + }); + + var userKey = '/uuid/' + RT_UUID_CACHE; + var lookupKey = '/accesskey/' + RT_KEY_CACHE; + + /* Write in the same format as cachePushHandler */ + var userPayload = { + uuid: RT_UUID_CACHE, + login: 'cache-path-user', + accesskeys: {} + }; + userPayload.accesskeys[RT_KEY_CACHE] = { + secret: RT_SECRET_CACHE, + scope: RT_SCOPE_JSON + }; + + var lookupData = JSON.stringify({ + type: 'accesskey', + accessKeyId: RT_KEY_CACHE, + userUuid: RT_UUID_CACHE, + credentialType: 'permanent', + scope: RT_SCOPE_JSON + }); + + REDIS.set(userKey, JSON.stringify(userPayload), + function (setErr) { + t.ok(!setErr, 'redis set user should not error'); + + REDIS.set(lookupKey, lookupData, + function (set2Err) { + t.ok(!set2Err, 'redis set lookup should not error'); + + var headers = helper.createHeaders({ + method: 'GET', + path: '/cross-path-test/obj.txt', + accessKey: RT_KEY_CACHE, + secret: RT_SECRET_CACHE + }); + headers['x-amz-content-sha256'] = + crypto.createHash('sha256') + .update('', 'utf8').digest('hex'); + + sigv4.verifySigV4({ + req: { + method: 'GET', + url: '/cross-path-test/obj.txt', + headers: headers, + query: {} + }, + log: log, + redis: REDIS + }, function (verErr, result) { + t.ok(!verErr, + 'sigv4 verify should not error: ' + + (verErr ? verErr.message : '')); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, RT_SCOPE_JSON, + 'scope must survive cachePush format → ' + + 'sigv4 round-trip unchanged'); + t.done(); + }); + }); + }); +}); + +/* + * PART 7: UFDS read-through fallback for permanent keys + * + * Verifies that when a permanent key is absent from Redis, + * verifySigV4 falls through to the UFDS client and returns + * the correct bucketScope. Guards against the ufdsPool→ufds + * property name mismatch that previously left this path dead. + */ + +var UFDS_FB_UUID = 'ufds-fallback-perm-uuid-001'; +var UFDS_FB_KEY = 'AKIAUFDSFALLBACK0001'; +var UFDS_FB_SECRET = 'ufdsFallbackSecretForTest1234567890a'; + +test('UFDS read-through: scoped permanent key found via UFDS when not in Redis', + function (t) { + var log = bunyan.createLogger({ + name: 'ufds-fb-perm-test', + level: 'fatal' + }); + + /* + * Do NOT write the key to Redis. This simulates a key + * that exists in UFDS but has not yet replicated. + * Only write the user record so the result can resolve. + */ + var userPayload = { + uuid: UFDS_FB_UUID, + login: 'ufds-fb-user', + accesskeys: {} + }; + + REDIS.set('/uuid/' + UFDS_FB_UUID, + JSON.stringify(userPayload), function (setErr) { + t.ok(!setErr, 'redis set user should not error'); + + /* Mock UFDS proxy with .search() */ + var mockUfds = { + search: function (_base, _opts, cb) { + cb(null, [ + { + accesskeyid: UFDS_FB_KEY, + accesskeysecret: UFDS_FB_SECRET, + status: 'Active', + accesskeyscope: RT_SCOPE_JSON, + _owner: UFDS_FB_UUID + } + ]); + } + }; + + var headers = helper.createHeaders({ + method: 'GET', + path: '/cross-path-test/obj.txt', + accessKey: UFDS_FB_KEY, + secret: UFDS_FB_SECRET + }); + headers['x-amz-content-sha256'] = + crypto.createHash('sha256') + .update('', 'utf8').digest('hex'); + + sigv4.verifySigV4({ + req: { + method: 'GET', + url: '/cross-path-test/obj.txt', + headers: headers, + query: {} + }, + log: log, + redis: REDIS, + ufds: mockUfds + }, function (verErr, result) { + t.ok(!verErr, + 'sigv4 verify should succeed via UFDS: ' + + (verErr ? verErr.message : '')); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, RT_SCOPE_JSON, + 'scope must survive UFDS read-through'); + t.equal(result.accessKeyId, UFDS_FB_KEY, + 'should have correct access key ID'); + t.done(); + }); + }); +}); + +/* + * PART 8: Shared Redis entry builder + * + * Verifies that the builder functions in + * redis-accesskey-format.js produce the exact structure + * expected by sigv4.js and redislib.js consumers. + */ + +var akFormat = require('../lib/redis-accesskey-format'); +var REDIS_TOMBSTONE; + +test('buildPermanentKeyEntry - scoped key', function (t) { + var entry = akFormat.buildPermanentKeyEntry( + 'mySecret', SCOPE_JSON); + t.equal(entry.secret, 'mySecret', + 'secret should be preserved'); + t.equal(entry.scope, SCOPE_JSON, + 'scope should be preserved'); + t.equal(Object.keys(entry).length, 3, + 'should have exactly three keys (secret, scope, version)'); + t.done(); +}); + +test('buildPermanentKeyEntry - unscoped key', function (t) { + var entry = akFormat.buildPermanentKeyEntry( + 'mySecret', null); + t.equal(entry.scope, null, + 'scope should be null for unscoped key'); + t.done(); +}); + +test('buildPermanentKeyLookup - scoped key', function (t) { + var lookup = akFormat.buildPermanentKeyLookup( + 'AKIATEST', 'user-uuid', SCOPE_JSON); + t.equal(lookup.type, 'accesskey', + 'type should be accesskey'); + t.equal(lookup.accessKeyId, 'AKIATEST', + 'accessKeyId should be preserved'); + t.equal(lookup.userUuid, 'user-uuid', + 'userUuid should be preserved'); + t.equal(lookup.credentialType, 'permanent', + 'credentialType should be permanent'); + t.equal(lookup.scope, SCOPE_JSON, + 'scope should be preserved'); + t.done(); +}); + +test('buildPermanentKeyLookup - unscoped key', function (t) { + var lookup = akFormat.buildPermanentKeyLookup( + 'AKIATEST', 'user-uuid', null); + t.equal(lookup.scope, null, + 'scope should be null for unscoped key'); + t.equal(lookup.credentialType, 'permanent', + 'credentialType should be permanent'); + t.done(); +}); + + +/* + * PART 9: Extracted sigv4 functions — buildPermanentResult, + * handlePermanentCredentialRedis + * + * Verifies the extracted functions produce the same results + * as the monolithic verifySigV4 did before decomposition. + */ + +test('buildPermanentResult - with scope', function (t) { + var result = sigv4._buildPermanentResult( + { uuid: USER_UUID }, + SCOPED_KEY_ID, + new Buffer('signingKey'), + SCOPE_JSON); + t.equal(result.user.uuid, USER_UUID, + 'user.uuid should match'); + t.equal(result.accessKeyId, SCOPED_KEY_ID, + 'accessKeyId should match'); + t.ok(Buffer.isBuffer(result.signingKey), + 'signingKey should be a buffer'); + t.equal(result.bucketScope, SCOPE_JSON, + 'bucketScope should be scope JSON'); + t.done(); +}); + +test('buildPermanentResult - null scope defaults to null', function (t) { + var result = sigv4._buildPermanentResult( + { uuid: USER_UUID }, + UNSCOPED_KEY_ID, + new Buffer('signingKey'), + null); + t.strictEqual(result.bucketScope, null, + 'bucketScope should be null'); + t.done(); +}); + +test('buildPermanentResult - undefined scope defaults to null', function (t) { + var result = sigv4._buildPermanentResult( + { uuid: USER_UUID }, + UNSCOPED_KEY_ID, + new Buffer('signingKey'), + undefined); + t.strictEqual(result.bucketScope, null, + 'bucketScope should be null for undefined'); + t.done(); +}); + +test('setup - fresh redis for PART 9', function (t) { + REDIS = redis.createClient('part9'); + t.done(); +}); + +test('exported functions exist', function (t) { + t.ok(typeof (sigv4._handlePermanentCredentialRedis) === 'function', + 'handlePermanentCredentialRedis should be exported'); + t.ok(typeof (sigv4._handleTemporaryCredentialRedis) === 'function', + 'handleTemporaryCredentialRedis should be exported'); + t.ok(typeof (sigv4._buildPermanentResult) === 'function', + 'buildPermanentResult should be exported'); + t.ok(typeof (sigv4._buildTemporaryResult) === 'function', + 'buildTemporaryResult should be exported'); + t.done(); +}); + +test('buildTemporaryResult - with scope and role', function (t) { + var result = sigv4._buildTemporaryResult({ + accessKeyId: 'MSTS00000000001', + secretAccessKey: 'tempSecret', + userUuid: USER_UUID, + assumedRole: { arn: 'arn:aws:iam::acct:role/Test' }, + principalUuid: 'principal-uuid', + expiration: '2026-04-18T13:00:00.000Z', + signingKey: new Buffer('signingKey'), + bucketScope: SCOPE_JSON + }); + t.equal(result.accessKeyId, 'MSTS00000000001', + 'accessKeyId should match'); + t.equal(result.userUuid, USER_UUID, + 'userUuid should match'); + t.equal(result.user.uuid, USER_UUID, + 'user.uuid should match'); + t.equal(result.account.uuid, USER_UUID, + 'account.uuid should match'); + t.equal(result.isTemporaryCredential, true, + 'isTemporaryCredential should be true'); + t.equal(result.credentialType, 'temporary', + 'credentialType should be temporary'); + t.equal(result.assumedRole.arn, + 'arn:aws:iam::acct:role/Test', + 'assumedRole should be preserved'); + t.equal(result.principalUuid, 'principal-uuid', + 'principalUuid should match'); + t.equal(result.bucketScope, SCOPE_JSON, + 'bucketScope should be scope JSON'); + t.done(); +}); + +test('buildTemporaryResult - null scope defaults to null', + function (t) { + var result = sigv4._buildTemporaryResult({ + accessKeyId: 'MSTS00000000002', + userUuid: USER_UUID, + signingKey: new Buffer('signingKey') + }); + t.strictEqual(result.bucketScope, null, + 'bucketScope should default to null'); + t.strictEqual(result.assumedRole, null, + 'assumedRole should default to null'); + t.strictEqual(result.expiration, null, + 'expiration should default to null'); + t.equal(result.principalUuid, USER_UUID, + 'principalUuid should default to userUuid'); + t.done(); +}); + +/* + * Helper: set up user in Redis and call + * handlePermanentCredentialRedis with a properly signed request. + */ +function setupAndVerifyExtracted(opts, t, callback) { + var user = opts.user; + var accessKeyId = opts.accessKeyId; + var secret = opts.secret; + var lookupVal = opts.lookupVal; + + var log = bunyan.createLogger({ + name: 'part9-test', + level: 'fatal' + }); + + REDIS.set('/uuid/' + user.uuid, JSON.stringify(user), + function (err1) { + if (err1) { + return (callback(err1)); + } + return (REDIS.set('/accesskey/' + accessKeyId, lookupVal, + function (err2) { + if (err2) { + return (callback(err2)); + } + + var headers = helper.createHeaders({ + method: opts.method || 'GET', + path: opts.path || '/bucket/key', + accessKey: accessKeyId, + secret: secret + }); + + var payloadHash = crypto.createHash('sha256') + .update('', 'utf8').digest('hex'); + headers['x-amz-content-sha256'] = payloadHash; + + var req = { + method: opts.method || 'GET', + url: opts.path || '/bucket/key', + headers: headers, + query: {} + }; + + var authInfo = sigv4.parseAuthHeader( + headers.authorization); + + return (sigv4._handlePermanentCredentialRedis( + authInfo, req, log, REDIS, null, callback)); + })); + }); +} + +test('handlePermanentCredentialRedis - scoped key returns bucketScope', + function (t) { + var P9_KEY_ID = 'AKIAP9SCOPED0000001'; + var P9_SECRET = 'p9scopedSecretKeyForTesting123456789abcde'; + var P9_UUID = 'p9-scoped-uuid-0000-0000-000000000001'; + + var user = { + uuid: P9_UUID, + login: 'p9scopeduser', + accesskeys: {} + }; + user.accesskeys[P9_KEY_ID] = + akFormat.buildPermanentKeyEntry(P9_SECRET, SCOPE_JSON); + + var lookupData = akFormat.buildPermanentKeyLookup( + P9_KEY_ID, P9_UUID, SCOPE_JSON); + + setupAndVerifyExtracted({ + user: user, + accessKeyId: P9_KEY_ID, + secret: P9_SECRET, + lookupVal: JSON.stringify(lookupData), + method: 'GET', + path: '/bucket/key' + }, t, function (err, result) { + t.ok(!err, 'should not error: ' + (err ? err.message : '')); + t.ok(result, 'should return result'); + t.equal(result.bucketScope, SCOPE_JSON, + 'scoped key should return scope JSON'); + t.equal(result.accessKeyId, P9_KEY_ID, + 'accessKeyId should match'); + t.ok(result.signingKey, + 'signingKey should be present'); + t.equal(result.user.uuid, P9_UUID, + 'user.uuid should match'); + t.done(); + }); +}); + +test('handlePermanentCredentialRedis - unscoped key returns null scope', + function (t) { + var P9U_KEY_ID = 'AKIAP9UNSCOPED00001'; + var P9U_SECRET = 'p9unscopedSecretKeyForTesting1234567890ab'; + var P9U_UUID = 'p9-unscoped-uuid-000-0000-000000000001'; + + var user = { + uuid: P9U_UUID, + login: 'p9unscopeduser', + accesskeys: {} + }; + user.accesskeys[P9U_KEY_ID] = + akFormat.buildPermanentKeyEntry(P9U_SECRET, null); + + var lookupData = akFormat.buildPermanentKeyLookup( + P9U_KEY_ID, P9U_UUID, null); + + setupAndVerifyExtracted({ + user: user, + accessKeyId: P9U_KEY_ID, + secret: P9U_SECRET, + lookupVal: JSON.stringify(lookupData), + method: 'GET', + path: '/bucket/key' + }, t, function (err, result) { + t.ok(!err, 'should not error: ' + (err ? err.message : '')); + t.ok(result, 'should return result'); + t.strictEqual(result.bucketScope, null, + 'unscoped key should return null bucketScope'); + t.done(); + }); +}); + +test('handlePermanentCredentialRedis - key not in Redis returns error', + function (t) { + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + var authInfo = { + accessKeyId: 'AKIANONEXISTENT00001', + dateStamp: '20260418', + region: 'us-east-1', + service: 's3', + signedHeaders: 'host;x-amz-date', + signature: 'dummy' + }; + var mockReq = { + headers: { + host: 'localhost', + 'x-amz-date': '20260418T120000Z' + }, + query: {} + }; + + sigv4._handlePermanentCredentialRedis( + authInfo, mockReq, log, REDIS, null, + function (err, result) { + t.ok(err, 'should return error for missing key'); + t.ok(!result, 'should not return result'); + t.ok(err.message.indexOf('Invalid access key') >= 0 || + err.restCode === 'InvalidSignature', + 'error should indicate invalid key'); + t.done(); + }); +}); + +test('handlePermanentCredentialRedis - legacy string format', + function (t) { + var LEGACY_KEY_ID = 'AKIALEGACY0000000001'; + var LEGACY_SECRET = 'legacySecretKeyForTesting1234567890abcde'; + var LEGACY_UUID = '660e8400-e29b-41d4-a716-446655440099'; + + var user = { + uuid: LEGACY_UUID, + login: 'legacyuser', + accesskeys: {} + }; + // Legacy format: bare string (no object wrapper) + user.accesskeys[LEGACY_KEY_ID] = LEGACY_SECRET; + + setupAndVerifyExtracted({ + user: user, + accessKeyId: LEGACY_KEY_ID, + secret: LEGACY_SECRET, + lookupVal: LEGACY_UUID, // Legacy: plain UUID string + method: 'GET', + path: '/bucket/key' + }, t, function (err, result) { + t.ok(!err, 'should not error: ' + (err ? err.message : '')); + t.ok(result, 'should return result'); + t.strictEqual(result.bucketScope, null, + 'legacy key should have null bucketScope'); + t.equal(result.accessKeyId, LEGACY_KEY_ID, + 'accessKeyId should match'); + t.done(); + }); +}); + + +/* + * PART 10: Revocation tombstones — durable scope-revoke + * + * Verifies that a revocation tombstone in Redis prevents + * the replicator from re-adding or modifying a revoked key. + */ + +test('setup - fresh redis for PART 10', function (t) { + REDIS_TOMBSTONE = redis.createClient('part10'); + t.done(); +}); + +test('revokedKeyPath produces correct path', function (t) { + var path = akFormat.revokedKeyPath('AKIATEST123'); + t.equal(path, '/revoked/AKIATEST123', + 'should produce /revoked/ prefix'); + t.done(); +}); + +test('buildRevocationTombstone includes revokedAt and userUuid', + function (t) { + var before = Date.now(); + var tombstone = akFormat.buildRevocationTombstone('user-uuid'); + var after = Date.now(); + t.equal(tombstone.userUuid, 'user-uuid', + 'userUuid should be preserved'); + t.ok(tombstone.revokedAt >= before && + tombstone.revokedAt <= after, + 'revokedAt should be current time'); + t.done(); +}); + +test('REVOKE_TTL_SECONDS is 24 hours', function (t) { + t.equal(akFormat.REVOKE_TTL_SECONDS, 86400, + 'TTL should be 86400 seconds (24 hours)'); + t.done(); +}); + +test('tombstone prevents replicator add()', function (t) { + var REVOKED_KEY_ID = 'AKIAREVOKED00000001'; + var REVOKED_SECRET = 'revokedSecretKeyForTesting12345678abcdef'; + var REVOKED_UUID = '770e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + // Write tombstone + var revokedKey = akFormat.revokedKeyPath(REVOKED_KEY_ID); + REDIS_TOMBSTONE.set(revokedKey, + JSON.stringify(akFormat.buildRevocationTombstone(REVOKED_UUID)), + function () { + + // Try to add the key via replicator + var entry = { + dn: 'changenumber=200, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + REVOKED_KEY_ID + + ', uuid=' + REVOKED_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [REVOKED_KEY_ID], + accesskeysecret: [REVOKED_SECRET], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [REVOKED_UUID], + _parent: ['uuid=' + REVOKED_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '200' + }; + + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS_TOMBSTONE + }, function (err, batch) { + t.ifError(err, 'add should not error'); + // Execute the batch and verify key was NOT added + batch.exec(function () { + var userKey = '/uuid/' + REVOKED_UUID; + REDIS_TOMBSTONE.get(userKey, function (_, val) { + if (val) { + var payload = JSON.parse(val); + t.ok(!payload.accesskeys || + !payload.accesskeys[REVOKED_KEY_ID], + 'revoked key should NOT be in Redis'); + } else { + t.ok(true, + 'user record absent (key not added)'); + } + t.done(); + }); + }); + }); + }); +}); + +test('no tombstone allows replicator add()', function (t) { + var NORMAL_KEY_ID = 'AKIANORMAL000000001'; + var NORMAL_SECRET = 'normalSecretKeyForTesting123456789abcdef'; + var NORMAL_UUID = '880e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + var entry = { + dn: 'changenumber=201, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + NORMAL_KEY_ID + + ', uuid=' + NORMAL_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [NORMAL_KEY_ID], + accesskeysecret: [NORMAL_SECRET], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [NORMAL_UUID], + _parent: ['uuid=' + NORMAL_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '201' + }; + + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS_TOMBSTONE + }, function (err, batch) { + t.ifError(err, 'add should not error'); + batch.exec(function () { + var userKey = '/uuid/' + NORMAL_UUID; + REDIS_TOMBSTONE.get(userKey, function (_, val) { + t.ok(val, 'user record should exist'); + var payload = JSON.parse(val); + t.ok(payload.accesskeys && + payload.accesskeys[NORMAL_KEY_ID], + 'key should be in Redis'); + t.done(); + }); + }); + }); +}); + +test('tombstone prevents replicator modify()', function (t) { + var MOD_KEY_ID = 'AKIAMODREVOKED00001'; + var MOD_SECRET = 'modRevokedSecretKey1234567890abcdefgh'; + var MOD_UUID = '990e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + // Pre-populate user record so modify has something to work with + var userPayload = { + uuid: MOD_UUID, + accesskeys: {} + }; + userPayload.accesskeys[MOD_KEY_ID] = + akFormat.buildPermanentKeyEntry(MOD_SECRET, null); + + var batch1 = REDIS_TOMBSTONE.multi(); + batch1.set('/uuid/' + MOD_UUID, JSON.stringify(userPayload)); + batch1.set('/accesskey/' + MOD_KEY_ID, + JSON.stringify(akFormat.buildPermanentKeyLookup( + MOD_KEY_ID, MOD_UUID, null))); + batch1.exec(function () { + + // Write tombstone + var revokedKey = akFormat.revokedKeyPath(MOD_KEY_ID); + REDIS_TOMBSTONE.set(revokedKey, + JSON.stringify(akFormat.buildRevocationTombstone(MOD_UUID)), + function () { + + // Try to modify the key via replicator + var modEntry = { + accesskeyid: [MOD_KEY_ID], + accesskeysecret: [MOD_SECRET], + _owner: [MOD_UUID], + credentialtype: ['permanent'], + objectclass: ['accesskey'], + accesskeyscope: [SCOPE_JSON] + }; + + var changes = [ + { + operation: 'add', + modification: { + type: 'accesskeyscope', + vals: [SCOPE_JSON] + } + } + ]; + + transform.modify({ + changes: changes, + modEntry: modEntry, + entry: { changenumber: '300' }, + log: log, + redis: REDIS_TOMBSTONE + }, function (err, modBatch) { + t.ifError(err, 'modify should not error'); + modBatch.exec(function () { + // Key should still have null scope (modify was skipped) + var userKey = '/uuid/' + MOD_UUID; + REDIS_TOMBSTONE.get(userKey, function (_, val) { + var payload = JSON.parse(val); + var keyData = payload.accesskeys[MOD_KEY_ID]; + t.equal(keyData.scope, null, + 'scope should NOT have been updated ' + + '(tombstone blocked modify)'); + t.done(); + }); + }); + }); + }); + }); +}); + + +/* + * PART 11: Write versioning — prevent stale replicator writes + * + * Verifies that the version field in Redis entries prevents + * stale replicator writes from overwriting newer cachePush data. + */ + +var REDIS_VERSION; + +test('setup - fresh redis for PART 11', function (t) { + REDIS_VERSION = redis.createClient('part11'); + t.done(); +}); + +test('buildPermanentKeyEntry includes version field', function (t) { + var entry = akFormat.buildPermanentKeyEntry( + 'mySecret', SCOPE_JSON, 42); + t.equal(entry.secret, 'mySecret', + 'secret should be preserved'); + t.equal(entry.scope, SCOPE_JSON, + 'scope should be preserved'); + t.equal(entry.version, 42, + 'version should be 42'); + t.done(); +}); + +test('buildPermanentKeyEntry defaults version to 0', function (t) { + var entry = akFormat.buildPermanentKeyEntry( + 'mySecret', null); + t.equal(entry.version, 0, + 'version should default to 0'); + t.done(); +}); + +test('buildPermanentKeyLookup includes version field', function (t) { + var lookup = akFormat.buildPermanentKeyLookup( + 'AKIATEST', 'user-uuid', SCOPE_JSON, 99); + t.equal(lookup.version, 99, + 'version should be 99'); + t.done(); +}); + +test('buildPermanentKeyLookup defaults version to 0', function (t) { + var lookup = akFormat.buildPermanentKeyLookup( + 'AKIATEST', 'user-uuid', null); + t.equal(lookup.version, 0, + 'version should default to 0'); + t.done(); +}); + +test('replicator skips write when existing version is newer', + function (t) { + var VER_KEY_ID = 'AKIAVERSION00000001'; + var VER_SECRET = 'versionSecretKeyForTesting1234567890abcde'; + var VER_UUID = 'aa0e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + // Pre-populate with a high version (simulating cachePush) + var userPayload = { + uuid: VER_UUID, + accesskeys: {} + }; + userPayload.accesskeys[VER_KEY_ID] = + akFormat.buildPermanentKeyEntry(VER_SECRET, SCOPE_JSON, + Date.now()); // cachePush version (~1.7 trillion) + + var batch0 = REDIS_VERSION.multi(); + batch0.set('/uuid/' + VER_UUID, JSON.stringify(userPayload)); + batch0.set('/accesskey/' + VER_KEY_ID, + JSON.stringify(akFormat.buildPermanentKeyLookup( + VER_KEY_ID, VER_UUID, SCOPE_JSON, Date.now()))); + batch0.exec(function () { + + // Replicator tries to add with low changenumber + var entry = { + dn: 'changenumber=500, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + VER_KEY_ID + + ', uuid=' + VER_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [VER_KEY_ID], + accesskeysecret: [VER_SECRET], + accesskeyscope: [null], // replicator has null scope + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [VER_UUID], + _parent: ['uuid=' + VER_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '500' + }; + + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS_VERSION + }, function (err, replicatorBatch) { + t.ifError(err, 'add should not error'); + replicatorBatch.exec(function () { + // Verify the original scope (from cachePush) survives + REDIS_VERSION.get('/uuid/' + VER_UUID, + function (_, val) { + var payload = JSON.parse(val); + var keyData = payload.accesskeys[VER_KEY_ID]; + t.equal(keyData.scope, SCOPE_JSON, + 'scope from cachePush should survive' + + ' (replicator write skipped)'); + t.ok(keyData.version > 500, + 'version should still be cachePush value'); + t.done(); + }); + }); + }); + }); +}); + +test('replicator overwrites when existing version is older', + function (t) { + var OLD_KEY_ID = 'AKIAOLDVERSION00001'; + var OLD_SECRET = 'oldVersionSecretKey1234567890abcdefghijk'; + var OLD_UUID = 'bb0e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + // Pre-populate with a low version + var userPayload = { + uuid: OLD_UUID, + accesskeys: {} + }; + userPayload.accesskeys[OLD_KEY_ID] = + akFormat.buildPermanentKeyEntry(OLD_SECRET, null, 100); + + var batch0 = REDIS_VERSION.multi(); + batch0.set('/uuid/' + OLD_UUID, JSON.stringify(userPayload)); + batch0.set('/accesskey/' + OLD_KEY_ID, + JSON.stringify(akFormat.buildPermanentKeyLookup( + OLD_KEY_ID, OLD_UUID, null, 100))); + batch0.exec(function () { + + // Replicator writes with higher changenumber + var entry = { + dn: 'changenumber=200, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + OLD_KEY_ID + + ', uuid=' + OLD_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [OLD_KEY_ID], + accesskeysecret: [OLD_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [OLD_UUID], + _parent: ['uuid=' + OLD_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '200' + }; + + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS_VERSION + }, function (err, replicatorBatch) { + t.ifError(err, 'add should not error'); + replicatorBatch.exec(function () { + REDIS_VERSION.get('/uuid/' + OLD_UUID, + function (_, val) { + var payload = JSON.parse(val); + var keyData = payload.accesskeys[OLD_KEY_ID]; + t.equal(keyData.scope, SCOPE_JSON, + 'scope should be updated by replicator'); + t.equal(keyData.version, 200, + 'version should be replicator changenumber'); + t.done(); + }); + }); + }); + }); +}); + +test('replicator writes when no existing version (backward compat)', + function (t) { + var NEW_KEY_ID = 'AKIANOVERSION000001'; + var NEW_SECRET = 'noVersionSecretKey12345678901234567890ab'; + var NEW_UUID = 'cc0e8400-e29b-41d4-a716-446655440099'; + + var log = bunyan.createLogger({name: 'test', level: 'fatal'}); + + // Pre-populate with old format (no version field) + var userPayload = { + uuid: NEW_UUID, + accesskeys: {} + }; + // Old format: bare string (version absent = 0) + userPayload.accesskeys[NEW_KEY_ID] = NEW_SECRET; + + REDIS_VERSION.set('/uuid/' + NEW_UUID, + JSON.stringify(userPayload), function () { + + var entry = { + dn: 'changenumber=50, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + NEW_KEY_ID + + ', uuid=' + NEW_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [NEW_KEY_ID], + accesskeysecret: [NEW_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [NEW_UUID], + _parent: ['uuid=' + NEW_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '50' + }; + + transform.add({ + changes: entry.changes, + entry: entry, + log: log, + redis: REDIS_VERSION + }, function (err, replicatorBatch) { + t.ifError(err, 'add should not error'); + replicatorBatch.exec(function () { + REDIS_VERSION.get('/uuid/' + NEW_UUID, + function (_, val) { + var payload = JSON.parse(val); + var keyData = payload.accesskeys[NEW_KEY_ID]; + t.ok(typeof (keyData) === 'object', + 'key should be upgraded to object format'); + t.equal(keyData.scope, SCOPE_JSON, + 'scope should be written'); + t.equal(keyData.version, 50, + 'version should be changenumber'); + t.done(); + }); + }); + }); + }); +}); + +/* + * Regression test for the vals[i] -> vals[0] fix in + * modify(). The original code indexed into vals with + * the outer loop variable (i), which read the wrong + * element when multiple changes were present. The fix + * uses vals[0] since each change entry has exactly one + * value. + */ +test('modify - vals[0] regression: status extracted ' + + 'correctly with multiple changes', + function (t) { + var log = this.log; + + /* Pre-populate a key in Redis */ + var addEntry = { + dn: 'changenumber=200, cn=changelog', + controls: [], + targetdn: 'accesskeyid=' + SCOPED_KEY_ID + + ', uuid=' + USER_UUID + + ', ou=users, o=smartdc', + changetype: 'add', + objectclass: 'changeLogEntry', + changetime: '2026-04-18T12:00:00.000Z', + changes: { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + status: ['Active'], + updated: ['1761762138761'], + objectclass: ['accesskey'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + + ', ou=users, o=smartdc'] + }, + changenumber: '200' + }; + + transform.add({ + changes: addEntry.changes, + entry: addEntry, + log: log, + redis: REDIS + }, function (addErr, addRes) { + t.ok(!addErr, 'add should not error'); + addRes.exec(function () { + /* + * Send a modify with status as the SECOND + * change (index 1). With the old vals[i] + * bug, the code would read vals[1] which + * is undefined, and status would be null + * instead of 'Inactive'. + */ + var modEntry = { + accesskeyid: [SCOPED_KEY_ID], + accesskeysecret: [SCOPED_SECRET], + accesskeyscope: [SCOPE_JSON], + created: ['1761762138761'], + objectclass: ['accesskey'], + status: ['Inactive'], + updated: ['1761762200000'], + _owner: [USER_UUID], + _parent: ['uuid=' + USER_UUID + + ', ou=users, o=smartdc'] + }; + + var changes = [ + { + operation: 'replace', + modification: { + type: 'updated', + vals: ['1761762200000'] + } + }, + { + operation: 'replace', + modification: { + type: 'status', + vals: ['Inactive'] + } + } + ]; + + transform.modify({ + log: log, + redis: REDIS, + changes: changes, + modEntry: modEntry, + entry: { changenumber: '201' } + }, function (modErr, modRes) { + t.ok(!modErr, 'modify should not error'); + /* + * Status is Inactive, so the key should + * be DELETED from Redis (3 ops: set user, + * del reverse lookup, multi). + */ + t.ok(modRes.queue.length >= 2, + 'should delete key from Redis'); + t.done(); + }); + }); + }); +});