Skip to content

Commit 3b7a912

Browse files
Merge pull request #8504 from tangledbytes/utkarsh/backport/5.17.1
[Backport 5.17] Backport of DB and Scale fixes to 4.17.1
2 parents 4777914 + 6ad9528 commit 3b7a912

File tree

12 files changed

+291
-39
lines changed

12 files changed

+291
-39
lines changed

config.js

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,22 @@ config.S3_RESTORE_REQUEST_MAX_DAYS = 30;
198198
*/
199199
config.S3_RESTORE_REQUEST_MAX_DAYS_BEHAVIOUR = 'TRUNCATE';
200200

201+
/**
202+
* S3_MAX_KEY_LENGTH controls the maximum key length that will be accepted
203+
* by NooBaa endpoints.
204+
*
205+
* This value is 1024 bytes for S3 but the default is `Infinity`
206+
*/
207+
config.S3_MAX_KEY_LENGTH = Infinity;
208+
209+
/**
210+
* S3_MAX_BUCKET_NAME_LENGTH controls the maximum bucket name length that
211+
* will be accepted by NooBaa endpoints.
212+
*
213+
* This value is 63 bytes for S3 but the default is `Infinity`
214+
*/
215+
config.S3_MAX_BUCKET_NAME_LENGTH = Infinity;
216+
201217
/////////////////////
202218
// SECRETS CONFIG //
203219
/////////////////////

src/endpoint/s3/ops/s3_get_bucket_lifecycle.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ async function get_bucket_lifecycle(req) {
4747
current_rule.Expiration = {
4848
Days: rule.expiration.days,
4949
Date: rule.expiration.date ? new Date(rule.expiration.date).toISOString() : undefined,
50+
ExpiredObjectDeleteMarker: rule.expiration.expired_object_delete_marker,
5051
};
5152
_.omitBy(current_rule.Expiration, _.isUndefined);
5253
}

src/endpoint/s3/ops/s3_put_bucket_lifecycle.js

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,13 @@ function parse_filter(filter) {
4646
return current_rule_filter;
4747
}
4848

49+
function reject_empty_field(field) {
50+
if (_.isEmpty(field)) {
51+
dbg.error('Invalid field - empty', field);
52+
throw new S3Error(S3Error.MalformedXML);
53+
}
54+
}
55+
4956
// parse lifecycle rule expiration
5057
function parse_expiration(expiration) {
5158
const output_expiration = {};
@@ -111,12 +118,14 @@ async function put_bucket_lifecycle(req) {
111118

112119
if (rule.Expiration?.length === 1) {
113120
current_rule.expiration = parse_expiration(rule.Expiration[0]);
121+
reject_empty_field(current_rule.expiration);
114122
}
115123

116124
if (rule.AbortIncompleteMultipartUpload?.length === 1) {
117125
current_rule.abort_incomplete_multipart_upload = _.omitBy({
118126
days_after_initiation: parse_lifecycle_field(rule.AbortIncompleteMultipartUpload[0].DaysAfterInitiation),
119127
}, _.isUndefined);
128+
reject_empty_field(current_rule.abort_incomplete_multipart_upload);
120129
}
121130

122131
if (rule.Transition?.length === 1) {
@@ -125,13 +134,15 @@ async function put_bucket_lifecycle(req) {
125134
date: parse_lifecycle_field(rule.Transition[0].Date, s => new Date(s)),
126135
days: parse_lifecycle_field(rule.Transition[0].Days),
127136
}, _.isUndefined);
137+
reject_empty_field(current_rule.transition);
128138
}
129139

130140
if (rule.NoncurrentVersionExpiration?.length === 1) {
131141
current_rule.noncurrent_version_expiration = _.omitBy({
132142
noncurrent_days: parse_lifecycle_field(rule.NoncurrentVersionExpiration[0].NoncurrentDays),
133143
newer_noncurrent_versions: parse_lifecycle_field(rule.NoncurrentVersionExpiration[0].NewerNoncurrentVersions),
134144
}, _.isUndefined);
145+
reject_empty_field(current_rule.noncurrent_version_expiration);
135146
}
136147

137148
if (rule.NoncurrentVersionTransition?.length === 1) {
@@ -140,6 +151,7 @@ async function put_bucket_lifecycle(req) {
140151
noncurrent_days: parse_lifecycle_field(rule.NoncurrentVersionTransition[0].NoncurrentDays),
141152
newer_noncurrent_versions: parse_lifecycle_field(rule.NoncurrentVersionTransition[0].NewerNoncurrentVersions),
142153
}, _.isUndefined);
154+
reject_empty_field(current_rule.noncurrent_version_transition);
143155
}
144156

145157
return current_rule;

src/endpoint/s3/s3_errors.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,8 @@ S3Error.InvalidURI = Object.freeze({
249249
message: 'Couldn\'t parse the specified URI.',
250250
http_code: 400,
251251
});
252-
S3Error.KeyTooLong = Object.freeze({
253-
code: 'KeyTooLong',
252+
S3Error.KeyTooLongError = Object.freeze({
253+
code: 'KeyTooLongError',
254254
message: 'Your key is too long.',
255255
http_code: 400,
256256
});

src/endpoint/s3/s3_rest.js

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,14 @@ function get_bucket_and_key(req) {
340340
key = suffix;
341341
}
342342
}
343+
344+
if (key?.length > config.S3_MAX_KEY_LENGTH) {
345+
throw new S3Error(S3Error.KeyTooLongError);
346+
}
347+
if (bucket?.length > config.S3_MAX_BUCKET_NAME_LENGTH) {
348+
throw new S3Error(S3Error.InvalidBucketName);
349+
}
350+
343351
return {
344352
bucket,
345353
// decode and replace hadoop _$folder$ in key

src/sdk/namespace_fs.js

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -963,6 +963,7 @@ class NamespaceFS {
963963
dbg.warn(`NamespaceFS.read_object_md: retrying retries=${retries} file_path=${file_path}`, err);
964964
retries -= 1;
965965
if (retries <= 0 || !native_fs_utils.should_retry_link_unlink(is_gpfs, err)) throw err;
966+
await P.delay(get_random_delay(config.NSFS_RANDOM_DELAY_BASE, 0, 50));
966967
}
967968
}
968969
this._throw_if_delete_marker(stat, params);
@@ -1037,6 +1038,7 @@ class NamespaceFS {
10371038
{bucket_path: this.bucket_path, object_name: params.key}, err);
10381039
throw err;
10391040
}
1041+
await P.delay(get_random_delay(config.NSFS_RANDOM_DELAY_BASE, 0, 50));
10401042
}
10411043
}
10421044
this._throw_if_delete_marker(stat, params);
@@ -2847,6 +2849,13 @@ class NamespaceFS {
28472849
const deleted_latest = file_path === latest_version_path;
28482850
if (deleted_latest) {
28492851
gpfs_options = await this._open_files_gpfs(fs_context, file_path, undefined, undefined, undefined, undefined, true);
2852+
if (gpfs_options) {
2853+
const src_stat = await gpfs_options.delete_version.src_file.stat(fs_context);
2854+
if (this._is_mismatch_version_id(src_stat, version_id)) {
2855+
dbg.warn('NamespaceFS._delete_single_object_versioned mismatch version_id', file_path, version_id, this._get_version_id_by_xattr(src_stat));
2856+
throw error_utils.new_error_code('MISMATCH_VERSION', 'file version does not match the version we asked for');
2857+
}
2858+
}
28502859
const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path();
28512860
await native_fs_utils.safe_unlink(fs_context, file_path, version_info,
28522861
gpfs_options?.delete_version, bucket_tmp_dir_path);
@@ -3007,15 +3016,17 @@ class NamespaceFS {
30073016
let latest_ver_info;
30083017
for (;;) {
30093018
try {
3010-
// get latest version_id if exists
3019+
// TODO get latest version from file in POSIX like in GPFS path
30113020
latest_ver_info = await this._get_version_info(fs_context, latest_ver_path);
3012-
const versioned_path = latest_ver_info && this._get_version_path(params.key, latest_ver_info.version_id_str);
3013-
const versioned_info = latest_ver_info && await this._get_version_info(fs_context, versioned_path);
3014-
3015-
dbg.log1('Namespace_fs._delete_latest_version:', latest_ver_info, versioned_path, versioned_info);
3021+
dbg.log1('Namespace_fs._delete_latest_version:', latest_ver_info);
30163022
if (latest_ver_info) {
3023+
if (is_gpfs) {
30173024
gpfs_options = await this._open_files_gpfs(fs_context, latest_ver_path, undefined, undefined, undefined,
3018-
undefined, true, versioned_info);
3025+
undefined, true);
3026+
const latest_fd = gpfs_options?.move_to_dst?.dst_file;
3027+
latest_ver_info = latest_fd && await this._get_version_info(fs_context, undefined, latest_fd);
3028+
}
3029+
const versioned_path = latest_ver_info && this._get_version_path(params.key, latest_ver_info.version_id_str);
30193030

30203031
const suspended_and_latest_is_not_null = this._is_versioning_suspended() &&
30213032
latest_ver_info.version_id_str !== NULL_VERSION_ID;
@@ -3172,7 +3183,7 @@ class NamespaceFS {
31723183
let dir_file;
31733184
let versioned_file;
31743185
try {
3175-
// open /versions/key_ver file if exists
3186+
// open /versions/key_ver file if exists. TODO is versioned_file needed
31763187
versioned_file = versioned_info && await native_fs_utils.open_file(fs_context, this.bucket_path, versioned_info.path, 'r');
31773188

31783189
// open files for deletion flow

src/sdk/nsfs_glacier_backend/tapecloud.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class TapeCloudUtils {
5151
throw new Error('process exited with non-zero exit code:', errcode);
5252
}
5353

54-
reader = new NewlineReader(fs_context, tmp);
54+
reader = new NewlineReader(fs_context, tmp, { skip_overflow_lines: true });
5555
await reader.forEach(async line => {
5656
const failure_case = line.startsWith("Fail");
5757
const success_case = line.startsWith("Success");

0 commit comments

Comments
 (0)