Skip to content

Commit 33e6fe0

Browse files
committed
Merge branch 'trunk' into saxenapranav/HADOOP-17912
2 parents 62aa01e + 93a3c6e commit 33e6fe0

File tree

31 files changed

+1094
-191
lines changed

31 files changed

+1094
-191
lines changed

LICENSE-binary

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -257,36 +257,36 @@ io.grpc:grpc-netty:1.26.0
257257
io.grpc:grpc-protobuf:1.26.0
258258
io.grpc:grpc-protobuf-lite:1.26.0
259259
io.grpc:grpc-stub:1.26.0
260-
io.netty:netty-all:4.1.94.Final
261-
io.netty:netty-buffer:4.1.94.Final
262-
io.netty:netty-codec:4.1.94.Final
263-
io.netty:netty-codec-dns:4.1.94.Final
264-
io.netty:netty-codec-haproxy:4.1.94.Final
265-
io.netty:netty-codec-http:4.1.94.Final
266-
io.netty:netty-codec-http2:4.1.94.Final
267-
io.netty:netty-codec-memcache:4.1.94.Final
268-
io.netty:netty-codec-mqtt:4.1.94.Final
269-
io.netty:netty-codec-redis:4.1.94.Final
270-
io.netty:netty-codec-smtp:4.1.94.Final
271-
io.netty:netty-codec-socks:4.1.94.Final
272-
io.netty:netty-codec-stomp:4.1.94.Final
273-
io.netty:netty-codec-xml:4.1.94.Final
274-
io.netty:netty-common:4.1.94.Final
275-
io.netty:netty-handler:4.1.94.Final
276-
io.netty:netty-handler-proxy:4.1.94.Final
277-
io.netty:netty-resolver:4.1.94.Final
278-
io.netty:netty-resolver-dns:4.1.94.Final
279-
io.netty:netty-transport:4.1.94.Final
280-
io.netty:netty-transport-rxtx:4.1.94.Final
281-
io.netty:netty-transport-sctp:4.1.94.Final
282-
io.netty:netty-transport-udt:4.1.94.Final
283-
io.netty:netty-transport-classes-epoll:4.1.94.Final
284-
io.netty:netty-transport-native-unix-common:4.1.94.Final
285-
io.netty:netty-transport-classes-kqueue:4.1.94.Final
286-
io.netty:netty-resolver-dns-classes-macos:4.1.94.Final
287-
io.netty:netty-transport-native-epoll:4.1.94.Final
288-
io.netty:netty-transport-native-kqueue:4.1.94.Final
289-
io.netty:netty-resolver-dns-native-macos:4.1.94.Final
260+
io.netty:netty-all:4.1.100.Final
261+
io.netty:netty-buffer:4.1.100.Final
262+
io.netty:netty-codec:4.1.100.Final
263+
io.netty:netty-codec-dns:4.1.100.Final
264+
io.netty:netty-codec-haproxy:4.1.100.Final
265+
io.netty:netty-codec-http:4.1.100.Final
266+
io.netty:netty-codec-http2:4.1.100.Final
267+
io.netty:netty-codec-memcache:4.1.100.Final
268+
io.netty:netty-codec-mqtt:4.1.100.Final
269+
io.netty:netty-codec-redis:4.1.100.Final
270+
io.netty:netty-codec-smtp:4.1.100.Final
271+
io.netty:netty-codec-socks:4.1.100.Final
272+
io.netty:netty-codec-stomp:4.1.100.Final
273+
io.netty:netty-codec-xml:4.1.100.Final
274+
io.netty:netty-common:4.1.100.Final
275+
io.netty:netty-handler:4.1.100.Final
276+
io.netty:netty-handler-proxy:4.1.100.Final
277+
io.netty:netty-resolver:4.1.100.Final
278+
io.netty:netty-resolver-dns:4.1.100.Final
279+
io.netty:netty-transport:4.1.100.Final
280+
io.netty:netty-transport-rxtx:4.1.100.Final
281+
io.netty:netty-transport-sctp:4.1.100.Final
282+
io.netty:netty-transport-udt:4.1.100.Final
283+
io.netty:netty-transport-classes-epoll:4.1.100.Final
284+
io.netty:netty-transport-native-unix-common:4.1.100.Final
285+
io.netty:netty-transport-classes-kqueue:4.1.100.Final
286+
io.netty:netty-resolver-dns-classes-macos:4.1.100.Final
287+
io.netty:netty-transport-native-epoll:4.1.100.Final
288+
io.netty:netty-transport-native-kqueue:4.1.100.Final
289+
io.netty:netty-resolver-dns-native-macos:4.1.100.Final
290290
io.opencensus:opencensus-api:0.12.3
291291
io.opencensus:opencensus-contrib-grpc-metrics:0.12.3
292292
io.reactivex:rxjava:1.3.8

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,13 @@ public final class StoreStatisticNames {
244244
public static final String OBJECT_MULTIPART_UPLOAD_ABORTED =
245245
"object_multipart_aborted";
246246

247+
/**
248+
* Object multipart list request.
249+
* Value :{@value}.
250+
*/
251+
public static final String OBJECT_MULTIPART_UPLOAD_LIST =
252+
"object_multipart_list";
253+
247254
/**
248255
* Object put/multipart upload count.
249256
* Value :{@value}.

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,7 @@ boolean doWaitForRestart() {
476476
private DataOutputStream blockStream;
477477
private DataInputStream blockReplyStream;
478478
private ResponseProcessor response = null;
479+
private final Object nodesLock = new Object();
479480
private volatile DatanodeInfo[] nodes = null; // list of targets for current block
480481
private volatile StorageType[] storageTypes = null;
481482
private volatile String[] storageIDs = null;
@@ -619,7 +620,9 @@ private void setPipeline(LocatedBlock lb) {
619620

620621
private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
621622
String[] storageIDs) {
622-
this.nodes = nodes;
623+
synchronized (nodesLock) {
624+
this.nodes = nodes;
625+
}
623626
this.storageTypes = storageTypes;
624627
this.storageIDs = storageIDs;
625628
}
@@ -916,7 +919,10 @@ void waitForAckedSeqno(long seqno) throws IOException {
916919
try (TraceScope ignored = dfsClient.getTracer().
917920
newScope("waitForAckedSeqno")) {
918921
LOG.debug("{} waiting for ack for: {}", this, seqno);
919-
int dnodes = nodes != null ? nodes.length : 3;
922+
int dnodes;
923+
synchronized (nodesLock) {
924+
dnodes = nodes != null ? nodes.length : 3;
925+
}
920926
int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes);
921927
long begin = Time.monotonicNow();
922928
try {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
192192
"dfs.namenode.path.based.cache.block.map.allocation.percent";
193193
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
194194

195+
public static final String DFS_NAMENODE_CRM_CHECKLOCKTIME_ENABLE =
196+
"dfs.namenode.crm.checklocktime.enable";
197+
public static final boolean DFS_NAMENODE_CRM_CHECKLOCKTIME_DEFAULT = false;
198+
199+
public static final String DFS_NAMENODE_CRM_MAXLOCKTIME_MS =
200+
"dfs.namenode.crm.maxlocktime.ms";
201+
public static final long DFS_NAMENODE_CRM_MAXLOCKTIME_MS_DEFAULT = 1000;
202+
203+
public static final String DFS_NAMENODE_CRM_SLEEP_TIME_MS =
204+
"dfs.namenode.crm.sleeptime.ms";
205+
public static final long DFS_NAMENODE_CRM_SLEEP_TIME_MS_DEFAULT = 300;
206+
195207
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT =
196208
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
197209
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY =

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
140140
*/
141141
private long scannedBlocks;
142142

143+
/**
144+
* Avoid to hold global lock for long times.
145+
*/
146+
private long lastScanTimeMs;
147+
143148
public CacheReplicationMonitor(FSNamesystem namesystem,
144149
CacheManager cacheManager, long intervalMs, ReentrantLock lock) {
145150
this.namesystem = namesystem;
@@ -284,6 +289,7 @@ public void close() throws IOException {
284289
private void rescan() throws InterruptedException {
285290
scannedDirectives = 0;
286291
scannedBlocks = 0;
292+
lastScanTimeMs = Time.monotonicNow();
287293
try {
288294
namesystem.writeLock();
289295
try {
@@ -315,6 +321,19 @@ private void resetStatistics() {
315321
}
316322
}
317323

324+
private void reacquireLock(long last) {
325+
long now = Time.monotonicNow();
326+
if (now - last > cacheManager.getMaxLockTimeMs()) {
327+
try {
328+
namesystem.writeUnlock();
329+
Thread.sleep(cacheManager.getSleepTimeMs());
330+
} catch (InterruptedException e) {
331+
} finally {
332+
namesystem.writeLock();
333+
}
334+
}
335+
}
336+
318337
/**
319338
* Scan all CacheDirectives. Use the information to figure out
320339
* what cache replication factor each block should have.
@@ -447,6 +466,10 @@ private void rescanFile(CacheDirective directive, INodeFile file) {
447466
if (cachedTotal == neededTotal) {
448467
directive.addFilesCached(1);
449468
}
469+
if (cacheManager.isCheckLockTimeEnable()) {
470+
reacquireLock(lastScanTimeMs);
471+
lastScanTimeMs = Time.monotonicNow();
472+
}
450473
LOG.debug("Directive {}: caching {}: {}/{} bytes", directive.getId(),
451474
file.getFullPathName(), cachedTotal, neededTotal);
452475
}
@@ -518,6 +541,10 @@ private void rescanCachedBlockMap() {
518541
}
519542
}
520543
}
544+
if (cacheManager.isCheckLockTimeEnable()) {
545+
reacquireLock(lastScanTimeMs);
546+
lastScanTimeMs = Time.monotonicNow();
547+
}
521548
for (Iterator<CachedBlock> cbIter = cachedBlocks.iterator();
522549
cbIter.hasNext(); ) {
523550
scannedBlocks++;
@@ -603,6 +630,10 @@ private void rescanCachedBlockMap() {
603630
);
604631
cbIter.remove();
605632
}
633+
if (cacheManager.isCheckLockTimeEnable()) {
634+
reacquireLock(lastScanTimeMs);
635+
lastScanTimeMs = Time.monotonicNow();
636+
}
606637
}
607638
}
608639

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,12 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

20+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_CHECKLOCKTIME_DEFAULT;
21+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_CHECKLOCKTIME_ENABLE;
22+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_MAXLOCKTIME_MS;
23+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_MAXLOCKTIME_MS_DEFAULT;
24+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_SLEEP_TIME_MS;
25+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CRM_SLEEP_TIME_MS_DEFAULT;
2026
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT;
2127
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT;
2228
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
@@ -194,6 +200,9 @@ public class CacheManager {
194200
* The CacheReplicationMonitor.
195201
*/
196202
private CacheReplicationMonitor monitor;
203+
private boolean isCheckLockTimeEnable;
204+
private long maxLockTimeMs;
205+
private long sleepTimeMs;
197206

198207
public static final class PersistState {
199208
public final CacheManagerSection section;
@@ -235,12 +244,31 @@ public PersistState(CacheManagerSection section,
235244
this.cachedBlocks = enabled ? new LightWeightGSet<CachedBlock, CachedBlock>(
236245
LightWeightGSet.computeCapacity(cachedBlocksPercent,
237246
"cachedBlocks")) : new LightWeightGSet<>(0);
247+
this.isCheckLockTimeEnable = conf.getBoolean(
248+
DFS_NAMENODE_CRM_CHECKLOCKTIME_ENABLE,
249+
DFS_NAMENODE_CRM_CHECKLOCKTIME_DEFAULT);
250+
this.maxLockTimeMs = conf.getLong(DFS_NAMENODE_CRM_MAXLOCKTIME_MS,
251+
DFS_NAMENODE_CRM_MAXLOCKTIME_MS_DEFAULT);
252+
this.sleepTimeMs = conf.getLong(DFS_NAMENODE_CRM_SLEEP_TIME_MS,
253+
DFS_NAMENODE_CRM_SLEEP_TIME_MS_DEFAULT);
238254
}
239255

240256
public boolean isEnabled() {
241257
return enabled;
242258
}
243259

260+
public boolean isCheckLockTimeEnable() {
261+
return isCheckLockTimeEnable;
262+
}
263+
264+
public long getMaxLockTimeMs() {
265+
return this.maxLockTimeMs;
266+
}
267+
268+
public long getSleepTimeMs() {
269+
return this.sleepTimeMs;
270+
}
271+
244272
/**
245273
* Resets all tracked directives and pools. Called during 2NN checkpointing to
246274
* reset FSNamesystem state. See {@link FSNamesystem#clear()}.

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2940,6 +2940,33 @@
29402940
</description>
29412941
</property>
29422942

2943+
<property>
2944+
<name>dfs.namenode.crm.checklocktime.enable</name>
2945+
<value>false</value>
2946+
<description>
2947+
Set to true to enable CacheManager to check amount of time to hold the
2948+
global rwlock.
2949+
</description>
2950+
</property>
2951+
2952+
<property>
2953+
<name>dfs.namenode.crm.maxlocktime.ms</name>
2954+
<value>1000</value>
2955+
<description>
2956+
The maximum amount of time that CacheManager should hold the global rwlock.
2957+
This configuration enable when set `dfs.namenode.crm.checklocktime.enable`.
2958+
</description>
2959+
</property>
2960+
2961+
<property>
2962+
<name>dfs.namenode.crm.sleeptime.ms</name>
2963+
<value>300</value>
2964+
<description>
2965+
The amount of time that CacheManager should relase the global rwlock.
2966+
This configuration enable when set `dfs.namenode.crm.checklocktime.enable`.
2967+
</description>
2968+
</property>
2969+
29432970
<property>
29442971
<name>dfs.datanode.max.locked.memory</name>
29452972
<value>0</value>

hadoop-project/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@
143143
<jna.version>5.2.0</jna.version>
144144
<gson.version>2.9.0</gson.version>
145145
<metrics.version>3.2.4</metrics.version>
146-
<netty4.version>4.1.94.Final</netty4.version>
146+
<netty4.version>4.1.100.Final</netty4.version>
147147
<snappy-java.version>1.1.10.4</snappy-java.version>
148148
<lz4-java.version>1.7.1</lz4-java.version>
149149

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1318,4 +1318,19 @@ private Constants() {
13181318
* The bucket region header.
13191319
*/
13201320
public static final String BUCKET_REGION_HEADER = "x-amz-bucket-region";
1321+
1322+
/**
1323+
* Should directory operations purge uploads?
1324+
* This adds at least one parallelized list operation to the call,
1325+
* plus the overhead of deletions.
1326+
* Value: {@value}.
1327+
*/
1328+
public static final String DIRECTORY_OPERATIONS_PURGE_UPLOADS =
1329+
"fs.s3a.directory.operations.purge.uploads";
1330+
1331+
/**
1332+
* Default value of {@link #DIRECTORY_OPERATIONS_PURGE_UPLOADS}: {@value}.
1333+
*/
1334+
public static final boolean DIRECTORY_OPERATIONS_PURGE_UPLOADS_DEFAULT = false;
1335+
13211336
}

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/MultipartUtils.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
import org.apache.hadoop.fs.s3a.impl.StoreContext;
3737
import org.apache.hadoop.fs.store.audit.AuditSpan;
3838

39-
import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_LIST;
39+
import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_MULTIPART_UPLOAD_LIST;
4040
import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfOperation;
4141

4242

@@ -66,7 +66,7 @@ private MultipartUtils() { }
6666
* @param maxKeys maximum batch size to request at a time from S3.
6767
* @return an iterator of matching uploads
6868
*/
69-
static MultipartUtils.UploadIterator listMultipartUploads(
69+
static RemoteIterator<MultipartUpload> listMultipartUploads(
7070
final StoreContext storeContext,
7171
S3Client s3,
7272
@Nullable String prefix,
@@ -196,7 +196,7 @@ private void requestNextBatch() throws IOException {
196196

197197
listing = invoker.retry("listMultipartUploads", prefix, true,
198198
trackDurationOfOperation(storeContext.getInstrumentation(),
199-
MULTIPART_UPLOAD_LIST.getSymbol(),
199+
OBJECT_MULTIPART_UPLOAD_LIST.getSymbol(),
200200
() -> s3.listMultipartUploads(requestBuilder.build())));
201201
LOG.debug("Listing found {} upload(s)",
202202
listing.uploads().size());

0 commit comments

Comments
 (0)