diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 0e65e60d33815..9526ac8fd07e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -128,6 +128,31 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
com.fasterxml.jackson.core
jackson-databind
+
+ org.junit.jupiter
+ junit-jupiter-api
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-params
+ test
+
+
+ org.junit.platform
+ junit-platform-launcher
+ test
+
+
+ org.junit.vintage
+ junit-vintage-engine
+ test
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
index 53cd557541a1f..0c9d589e2b791 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
@@ -19,9 +19,8 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import java.io.File;
import java.io.IOException;
@@ -35,15 +34,13 @@
/**
* Test of the URL stream handler factory.
*/
+@Timeout(30)
public class TestUrlStreamHandlerFactory {
private static final int RUNS = 20;
private static final int THREADS = 10;
private static final int TASKS = 200;
- @Rule
- public Timeout globalTimeout = new Timeout(30000);
-
@Test
public void testConcurrency() throws Exception {
for (int i = 0; i < RUNS; i++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
index af602477d1cbd..28400bfb0ec2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.fs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Tests for XAttr
objects.
@@ -31,7 +31,7 @@
public class TestXAttr {
private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws Exception {
byte[] value = {0x31, 0x32, 0x33};
XATTR = new XAttr.Builder()
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
index 5ccee3e86a78e..c8b678526a6bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java
@@ -23,13 +23,10 @@
import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -44,15 +41,16 @@
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.hadoop.util.concurrent.HadoopExecutors.newFixedThreadPool;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This tests basic operations of {@link DFSOpsCountStatistics} class.
*/
+@Timeout(10)
public class TestDFSOpsCountStatistics {
private static final Logger LOG = LoggerFactory.getLogger(
@@ -63,12 +61,7 @@ public class TestDFSOpsCountStatistics {
new DFSOpsCountStatistics();
private final Map expectedOpsCountMap = new HashMap<>();
- @Rule
- public final Timeout globalTimeout = new Timeout(10 * 1000);
- @Rule
- public final ExpectedException exception = ExpectedException.none();
-
- @Before
+ @BeforeEach
public void setup() {
for (OpType opType : OpType.values()) {
expectedOpsCountMap.put(opType, new AtomicLong());
@@ -178,7 +171,7 @@ public void run() {
startBlocker.countDown(); // all threads start making directories
allDone.await(); // wait until all threads are done
- assertNull("Child failed with exception.", childError.get());
+ assertNull(childError.get(), "Child failed with exception.");
verifyStatistics();
} finally {
threadPool.shutdownNow();
@@ -207,9 +200,9 @@ private void verifyStatistics() {
for (OpType opType : OpType.values()) {
assertNotNull(expectedOpsCountMap.get(opType));
assertNotNull(statistics.getLong(opType.getSymbol()));
- assertEquals("Not expected count for operation " + opType.getSymbol(),
- expectedOpsCountMap.get(opType).longValue(),
- statistics.getLong(opType.getSymbol()).longValue());
+ assertEquals(expectedOpsCountMap.get(opType).longValue(),
+ statistics.getLong(opType.getSymbol()).longValue(),
+ "Not expected count for operation " + opType.getSymbol());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
index 8bf60971b3d7b..e2c5cfae128f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
@@ -20,8 +20,9 @@
import java.util.Random;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.io.DataOutputBuffer;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.fail;
public class TestDFSPacket {
private static final int chunkSize = 512;
@@ -59,7 +60,7 @@ public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2,
int off2, int len) {
for (int i = 0; i < len; i++) {
if (buf1[off1 + i] != buf2[off2 + i]) {
- Assert.fail("arrays differ at byte " + i + ". " +
+ fail("arrays differ at byte " + i + ". " +
"The first array has " + (int) buf1[off1 + i] +
", but the second array has " + (int) buf2[off2 + i]);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
index d097eaf70c05b..adc1d9125a05e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
@@ -20,12 +20,12 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.net.InetSocketAddress;
import java.net.URI;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/** Test NameNode port defaulting code. */
public class TestDefaultNameNodePort {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
index 3f1ff8826957e..99b4c0bf68821 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.net.unix.DomainSocket;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -33,9 +33,9 @@
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestPeerCache {
static final Logger LOG = LoggerFactory.getLogger(TestPeerCache.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
index f1a11edeefcd1..9572e5117b53d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
@@ -23,9 +23,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -37,7 +36,12 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
-import static org.junit.Assert.assertSame;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
public class TestLeaseRenewer {
private final String FAKE_AUTHORITY="hdfs://nn1/";
@@ -54,7 +58,7 @@ public class TestLeaseRenewer {
/** Cause renewals often so test runs quickly. */
private static final long FAST_GRACE_PERIOD = 100L;
- @Before
+ @BeforeEach
public void setupMocksAndRenewer() throws IOException {
MOCK_DFSCLIENT = createMockClient();
@@ -82,19 +86,19 @@ public void testInstanceSharing() throws IOException {
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
LeaseRenewer lr2 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
- Assert.assertSame(lr, lr2);
+ assertSame(lr, lr2);
// But a different UGI should return a different instance
LeaseRenewer lr3 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_B, MOCK_DFSCLIENT);
- Assert.assertNotSame(lr, lr3);
+ assertNotSame(lr, lr3);
// A different authority with same UGI should also be a different
// instance.
LeaseRenewer lr4 = LeaseRenewer.getInstance(
"someOtherAuthority", FAKE_UGI_B, MOCK_DFSCLIENT);
- Assert.assertNotSame(lr, lr4);
- Assert.assertNotSame(lr3, lr4);
+ assertNotSame(lr, lr4);
+ assertNotSame(lr3, lr4);
}
@Test
@@ -122,7 +126,7 @@ public Boolean answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(50);
}
if (leaseRenewalCount.get() == 0) {
- Assert.fail("Did not renew lease at all!");
+ fail("Did not renew lease at all!");
}
renewer.closeClient(MOCK_DFSCLIENT);
@@ -176,23 +180,21 @@ public Boolean get() {
// Make sure renewer is not running due to expiration.
Thread.sleep(FAST_GRACE_PERIOD * 2);
- Assert.assertTrue(!renewer.isRunning());
+ assertTrue(!renewer.isRunning());
}
@Test
public void testThreadName() throws Exception {
- Assert.assertFalse("Renewer not initially running",
- renewer.isRunning());
+ assertFalse(renewer.isRunning(), "Renewer not initially running");
// Pretend to open a file
renewer.put(MOCK_DFSCLIENT);
- Assert.assertTrue("Renewer should have started running",
- renewer.isRunning());
+ assertTrue(renewer.isRunning(), "Renewer should have started running");
// Check the thread name is reasonable
String threadName = renewer.getDaemonName();
- Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
+ assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
// Pretend to close the file
renewer.closeClient(MOCK_DFSCLIENT);
@@ -203,7 +205,7 @@ public void testThreadName() throws Exception {
while (renewer.isRunning() && Time.monotonicNow() < failTime) {
Thread.sleep(50);
}
- Assert.assertFalse(renewer.isRunning());
+ assertFalse(renewer.isRunning());
}
/**
@@ -213,24 +215,23 @@ public void testThreadName() throws Exception {
*/
@Test
public void testDaemonThreadLeak() throws Exception {
- Assert.assertFalse("Renewer not initially running", renewer.isRunning());
+ assertFalse(renewer.isRunning(), "Renewer not initially running");
// Pretend to create a file#1, daemon#1 starts
renewer.put(MOCK_DFSCLIENT);
- Assert.assertTrue("Renewer should have started running",
- renewer.isRunning());
+ assertTrue(renewer.isRunning(), "Renewer should have started running");
Pattern daemonThreadNamePattern = Pattern.compile("LeaseRenewer:\\S+");
- Assert.assertEquals(1, countThreadMatching(daemonThreadNamePattern));
+ assertEquals(1, countThreadMatching(daemonThreadNamePattern));
// Pretend to create file#2, daemon#2 starts due to expiration
LeaseRenewer lastRenewer = renewer;
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
- Assert.assertEquals(lastRenewer, renewer);
+ assertEquals(lastRenewer, renewer);
// Pretend to close file#1
renewer.closeClient(MOCK_DFSCLIENT);
- Assert.assertEquals(1, countThreadMatching(daemonThreadNamePattern));
+ assertEquals(1, countThreadMatching(daemonThreadNamePattern));
// Pretend to be expired
renewer.setEmptyTime(0);
@@ -249,7 +250,7 @@ public void testDaemonThreadLeak() throws Exception {
int threadCount = countThreadMatching(daemonThreadNamePattern);
//Sometimes old LR#Daemon gets closed and lead to count 1 (rare scenario)
- Assert.assertTrue(1 == threadCount || 2 == threadCount);
+ assertTrue(1 == threadCount || 2 == threadCount);
// After grace period, both daemon#1 and renewer#1 will be removed due to
// expiration, then daemon#2 will leak before HDFS-14575.
@@ -259,14 +260,14 @@ public void testDaemonThreadLeak() throws Exception {
lastRenewer = renewer;
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
- Assert.assertEquals(lastRenewer, renewer);
+ assertEquals(lastRenewer, renewer);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
renewer.closeClient(MOCK_DFSCLIENT);
renewer.setEmptyTime(0);
// Make sure LeaseRenewer#daemon threads will terminate after grace period
Thread.sleep(FAST_GRACE_PERIOD * 2);
- Assert.assertEquals("LeaseRenewer#daemon thread leaks", 0,
- countThreadMatching(daemonThreadNamePattern));
+ assertEquals(0, countThreadMatching(daemonThreadNamePattern),
+ "LeaseRenewer#daemon thread leaks");
}
private static int countThreadMatching(Pattern pattern) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockType.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockType.java
index 98f586c42d3b9..f96816d70967c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockType.java
@@ -17,11 +17,11 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Test the BlockType class.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
index f1674af8b57f1..3c4ab7eb8d97b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
@@ -19,11 +19,11 @@
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Test ErasureCodingPolicy.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicyInfo.java
index 417ea3027ee3a..cad696f8a58c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicyInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicyInfo.java
@@ -17,15 +17,15 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies.RS_6_3_POLICY_ID;
import static org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState.DISABLED;
import static org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState.ENABLED;
import static org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState.REMOVED;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Test {@link ErasureCodingPolicyInfo}.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
index 10c1671b62725..5d43517c70731 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
@@ -17,10 +17,10 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestExtendedBlock {
@@ -71,7 +71,6 @@ public void testHashcode() {
}
private static void assertNotEquals(Object a, Object b) {
- assertFalse("expected not equal: '" + a + "' and '" + b + "'",
- a.equals(b));
+ assertFalse(a.equals(b), "expected not equal: '" + a + "' and '" + b + "'");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
index 683a1baae0cfc..2008d9cd95490 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
@@ -29,9 +29,9 @@
import org.apache.hadoop.fs.FileStatus;
-import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Unit test verifying that {@link HdfsFileStatus} is a superset of
@@ -51,10 +51,9 @@ public void testInterfaceSuperset() {
hfsM.addAll(signatures(Object.class));
assertTrue(fsM.removeAll(hfsM));
// verify that FileStatus is a subset of HdfsFileStatus
- assertEquals(fsM.stream()
- .map(MethodSignature::toString)
- .collect(joining("\n")),
- Collections.emptySet(), fsM);
+ assertEquals(Collections.emptySet(), fsM, fsM.stream()
+ .map(MethodSignature::toString)
+ .collect(joining("\n")));
}
/** Map non-static, declared methods for this class to signatures. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
index c225a98c8244a..0c073549c3a37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
@@ -18,14 +18,14 @@
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Testing class for {@link ReadOnly} annotation on {@link ClientProtocol}.
@@ -96,9 +96,8 @@ private void checkIsReadOnly(String methodName, boolean expected) {
// with the same name. The assumption is that all these methods should
// share the same annotation.
if (m.getName().equals(methodName)) {
- assertEquals("Expected ReadOnly for method '" + methodName +
- "' to be " + expected,
- m.isAnnotationPresent(ReadOnly.class), expected);
+ assertEquals(m.isAnnotationPresent(ReadOnly.class), expected,
+ "Expected ReadOnly for method '" + methodName + "' to be " + expected);
return;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
index a04e779e8004d..cceb01b31cd16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -25,11 +25,9 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.event.Level;
@@ -43,10 +41,11 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -85,15 +84,12 @@ public class TestConfiguredFailoverProxyProvider {
private String ns4nn1Hostname = "localhost";
private String ns4nn2Hostname = "127.0.0.1";
- @Rule
- public final ExpectedException exception = ExpectedException.none();
-
- @BeforeClass
+ @BeforeAll
public static void setupClass() throws Exception {
GenericTestUtils.setLogLevel(RequestHedgingProxyProvider.LOG, Level.TRACE);
}
- @Before
+ @BeforeEach
public void setup() throws URISyntaxException {
ns1 = "mycluster-1-" + Time.monotonicNow();
ns1Uri = new URI("hdfs://" + ns1);
@@ -329,32 +325,23 @@ private void testResolveDomainNameUsingDNS(boolean useFQDN) throws Exception {
assertEquals(2, proxyResults.size());
if (Shell.isJavaVersionAtLeast(14) && useFQDN) {
// JDK-8225499. The string format of unresolved address has been changed.
- assertTrue(
- "nn1 wasn't returned: " + proxyResults,
- proxyResults.containsKey(resolvedHost1 + "/:8020"));
- assertTrue(
- "nn2 wasn't returned: " + proxyResults,
- proxyResults.containsKey(resolvedHost2 + "/:8020"));
+ assertTrue(proxyResults.containsKey(resolvedHost1 + "/:8020"),
+ "nn1 wasn't returned: " + proxyResults);
+ assertTrue(proxyResults.containsKey(resolvedHost2 + "/:8020"),
+ "nn2 wasn't returned: " + proxyResults);
} else {
- assertTrue(
- "nn1 wasn't returned: " + proxyResults,
- proxyResults.containsKey(resolvedHost1 + ":8020"));
- assertTrue(
- "nn2 wasn't returned: " + proxyResults,
- proxyResults.containsKey(resolvedHost2 + ":8020"));
+ assertTrue(proxyResults.containsKey(resolvedHost1 + ":8020"),
+ "nn1 wasn't returned: " + proxyResults);
+ assertTrue(proxyResults.containsKey(resolvedHost2 + ":8020"),
+ "nn2 wasn't returned: " + proxyResults);
}
// Check that the Namenodes were invoked
assertEquals(NUM_ITERATIONS, nn1Count.get() + nn2Count.get());
- assertTrue("nn1 was selected too much:" + nn1Count.get(),
- nn1Count.get() < NUM_ITERATIONS);
- assertTrue("nn1 should have been selected: " + nn1Count.get(),
- nn1Count.get() > 0);
- assertTrue("nn2 was selected too much:" + nn2Count.get(),
- nn2Count.get() < NUM_ITERATIONS);
- assertTrue(
- "nn2 should have been selected: " + nn2Count.get(),
- nn2Count.get() > 0);
+ assertTrue(nn1Count.get() < NUM_ITERATIONS, "nn1 was selected too much:" + nn1Count.get());
+ assertTrue(nn1Count.get() > 0, "nn1 should have been selected: " + nn1Count.get());
+ assertTrue(nn2Count.get() < NUM_ITERATIONS, "nn2 was selected too much:" + nn2Count.get());
+ assertTrue(nn2Count.get() > 0, "nn2 should have been selected: " + nn2Count.get());
}
@Test
@@ -416,13 +403,13 @@ public void testResolveDomainNameUsingDNSUnknownHost() throws Exception {
addDNSSettings(dnsConf, false, false);
Map proxyMap = new HashMap<>();
- exception.expect(RuntimeException.class);
- ConfiguredFailoverProxyProvider provider =
- new ConfiguredFailoverProxyProvider<>(
- dnsConf, ns3Uri, ClientProtocol.class, createFactory(proxyMap));
+ assertThrows(RuntimeException.class, () -> {
+ ConfiguredFailoverProxyProvider provider =
+ new ConfiguredFailoverProxyProvider<>(
+ dnsConf, ns3Uri, ClientProtocol.class, createFactory(proxyMap));
- assertNull("failover proxy cannot be created due to unknownhost",
- provider);
+ assertNull(provider, "failover proxy cannot be created due to unknownhost");
+ });
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 5e6cdf5eedc73..8c913377c01fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,17 +42,18 @@
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.when;
@@ -66,12 +67,12 @@ public class TestRequestHedgingProxyProvider {
private URI nnUri;
private String ns;
- @BeforeClass
+ @BeforeAll
public static void setupClass() throws Exception {
GenericTestUtils.setLogLevel(RequestHedgingProxyProvider.LOG, Level.TRACE);
}
- @Before
+ @BeforeEach
public void setup() throws URISyntaxException {
ns = "mycluster-" + Time.monotonicNow();
nnUri = new URI("hdfs://" + ns);
@@ -103,10 +104,10 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
RequestHedgingProxyProvider provider =
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(badMock, goodMock));
- Assert.assertTrue(Proxy.getInvocationHandler(
+ assertTrue(Proxy.getInvocationHandler(
provider.getProxy().proxy) instanceof RpcInvocationHandler);
long[] stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
+ assertTrue(stats.length == 1);
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
}
@@ -202,8 +203,8 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(goodMock, badMock));
long[] stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
}
@@ -221,9 +222,9 @@ public void testHedgingWhenBothFail() throws Exception {
createFactory(badMock, worseMock));
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since both namenodes throw IOException !!");
+ fail("Should fail since both namenodes throw IOException !!");
} catch (Exception e) {
- Assert.assertTrue(e instanceof MultiException);
+ assertTrue(e instanceof MultiException);
}
Mockito.verify(badMock).getStats();
Mockito.verify(worseMock).getStats();
@@ -261,61 +262,61 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(goodMock, badMock));
long[] stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
- Assert.assertEquals(2, counter.get());
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
+ assertEquals(2, counter.get());
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
// Ensure only the previous successful one is invoked
Mockito.verifyNoMoreInteractions(badMock);
- Assert.assertEquals(3, counter.get());
+ assertEquals(3, counter.get());
// Flip to standby.. so now this should fail
isGood[0] = 2;
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since previously successful proxy now fails ");
+ fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
- Assert.assertTrue(ex instanceof IOException);
+ assertTrue(ex instanceof IOException);
}
- Assert.assertEquals(4, counter.get());
+ assertEquals(4, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(2, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(2, stats[0]);
// Counter should update only once
- Assert.assertEquals(5, counter.get());
+ assertEquals(5, counter.get());
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(2, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(2, stats[0]);
// Counter updates only once now
- Assert.assertEquals(6, counter.get());
+ assertEquals(6, counter.get());
// Flip back to old active.. so now this should fail
isGood[0] = 1;
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since previously successful proxy now fails ");
+ fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
- Assert.assertTrue(ex instanceof IOException);
+ assertTrue(ex instanceof IOException);
}
- Assert.assertEquals(7, counter.get());
+ assertEquals(7, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
+ assertTrue(stats.length == 1);
// Ensure correct proxy was called
- Assert.assertEquals(1, stats[0]);
+ assertEquals(1, stats[0]);
}
@Test
@@ -338,7 +339,7 @@ public void testFileNotFoundExceptionWithSingleProxy() throws Exception {
ClientProtocol.class, createFactory(standby, active));
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " FileNotFoundException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
@@ -346,14 +347,14 @@ public void testFileNotFoundExceptionWithSingleProxy() throws Exception {
if (rEx instanceof StandbyException) {
continue;
}
- Assert.assertTrue(rEx instanceof FileNotFoundException);
+ assertTrue(rEx instanceof FileNotFoundException);
}
}
//Perform failover now, there will only be one active proxy now
provider.performFailover(active);
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " FileNotFoundException!");
} catch (RemoteException ex) {
Exception rEx = ex.unwrapRemoteException();
@@ -363,7 +364,7 @@ public void testFileNotFoundExceptionWithSingleProxy() throws Exception {
Mockito.verify(standby, Mockito.times(2))
.getBlockLocations(anyString(), anyLong(), anyLong());
} else {
- Assert.assertTrue(rEx instanceof FileNotFoundException);
+ assertTrue(rEx instanceof FileNotFoundException);
Mockito.verify(active, Mockito.times(2))
.getBlockLocations(anyString(), anyLong(), anyLong());
Mockito.verify(standby).getBlockLocations(anyString(), anyLong(),
@@ -395,22 +396,22 @@ public void testSingleProxyFailover() throws Exception {
ClientProtocol.class, createFactory(active));
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " FileNotFoundException!");
} catch (RemoteException ex) {
Exception rEx = ex.unwrapRemoteException();
- Assert.assertTrue(rEx instanceof FileNotFoundException);
+ assertTrue(rEx instanceof FileNotFoundException);
}
//Perform failover now, there will be no active proxies now
provider.performFailover(active);
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " FileNotFoundException!");
} catch (RemoteException ex) {
Exception rEx = ex.unwrapRemoteException();
- Assert.assertTrue(rEx instanceof IOException);
- Assert.assertTrue(rEx.getMessage().equals("No valid proxies left."
+ assertTrue(rEx instanceof IOException);
+ assertTrue(rEx.getMessage().equals("No valid proxies left."
+ " All NameNode proxies have failed over."));
}
}
@@ -465,75 +466,75 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(goodMock, badMock, worseMock));
long[] stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
- Assert.assertEquals(3, counter.get());
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
+ assertEquals(3, counter.get());
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
Mockito.verify(worseMock).getStats();
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
// Ensure only the previous successful one is invoked
Mockito.verifyNoMoreInteractions(badMock);
Mockito.verifyNoMoreInteractions(worseMock);
- Assert.assertEquals(4, counter.get());
+ assertEquals(4, counter.get());
// Flip to standby.. so now this should fail
isGood[0] = 2;
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since previously successful proxy now fails ");
+ fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
- Assert.assertTrue(ex instanceof IOException);
+ assertTrue(ex instanceof IOException);
}
- Assert.assertEquals(5, counter.get());
+ assertEquals(5, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(2, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(2, stats[0]);
// Counter updates twice since both proxies are tried on failure
- Assert.assertEquals(7, counter.get());
+ assertEquals(7, counter.get());
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(2, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(2, stats[0]);
// Counter updates only once now
- Assert.assertEquals(8, counter.get());
+ assertEquals(8, counter.get());
// Flip to Other standby.. so now this should fail
isGood[0] = 3;
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since previously successful proxy now fails ");
+ fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
- Assert.assertTrue(ex instanceof IOException);
+ assertTrue(ex instanceof IOException);
}
// Counter should ipdate only 1 time
- Assert.assertEquals(9, counter.get());
+ assertEquals(9, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
+ assertTrue(stats.length == 1);
// Ensure correct proxy was called
- Assert.assertEquals(3, stats[0]);
+ assertEquals(3, stats[0]);
// Counter updates twice since both proxies are tried on failure
- Assert.assertEquals(11, counter.get());
+ assertEquals(11, counter.get());
stats = provider.getProxy().proxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(3, stats[0]);
+ assertTrue(stats.length == 1);
+ assertEquals(3, stats[0]);
// Counter updates only once now
- Assert.assertEquals(12, counter.get());
+ assertEquals(12, counter.get());
}
@Test
@@ -558,7 +559,7 @@ public void testHedgingWhenFileNotFoundException() throws Exception {
ClientProtocol.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " FileNotFoundException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
@@ -566,7 +567,7 @@ public void testHedgingWhenFileNotFoundException() throws Exception {
if (rEx instanceof StandbyException) {
continue;
}
- Assert.assertTrue(rEx instanceof FileNotFoundException);
+ assertTrue(rEx instanceof FileNotFoundException);
}
}
Mockito.verify(active).getBlockLocations(anyString(),
@@ -591,17 +592,17 @@ public void testHedgingWhenConnectException() throws Exception {
ClientProtocol.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since the active namenode throws"
+ fail("Should fail since the active namenode throws"
+ " ConnectException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (ex instanceof RemoteException) {
Exception rEx = ((RemoteException) ex)
.unwrapRemoteException();
- Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(),
- rEx instanceof StandbyException);
+ assertTrue(rEx instanceof StandbyException,
+ "Unexpected RemoteException: " + rEx.getMessage());
} else {
- Assert.assertTrue(ex instanceof ConnectException);
+ assertTrue(ex instanceof ConnectException);
}
}
}
@@ -622,13 +623,13 @@ public void testHedgingWhenConnectAndEOFException() throws Exception {
ClientProtocol.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
- Assert.fail("Should fail since both active and standby namenodes throw"
+ fail("Should fail since both active and standby namenodes throw"
+ " Exceptions!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (!(ex instanceof ConnectException) &&
!(ex instanceof EOFException)) {
- Assert.fail("Unexpected Exception " + ex.getMessage());
+ fail("Unexpected Exception " + ex.getMessage());
}
}
}
@@ -672,9 +673,9 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
createFactory(delayMock, badMock));
final ClientProtocol delayProxy = provider.getProxy().proxy;
long[] stats = delayProxy.getStats();
- Assert.assertTrue(stats.length == 1);
- Assert.assertEquals(1, stats[0]);
- Assert.assertEquals(1, counter.get());
+ assertTrue(stats.length == 1);
+ assertEquals(1, stats[0]);
+ assertEquals(1, counter.get());
Thread t = new Thread() {
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
index 0c202edac6d48..189792cc50850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
@@ -23,10 +23,9 @@
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -35,19 +34,25 @@
import java.util.ArrayList;
import java.util.Iterator;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
public class TestShortCircuitShm {
public static final Logger LOG = LoggerFactory.getLogger(
TestShortCircuitShm.class);
private static final File TEST_BASE = GenericTestUtils.getTestDir();
- @Before
+ @BeforeEach
public void before() {
- Assume.assumeTrue(null ==
+ assumeTrue(null ==
SharedFileDescriptorFactory.getLoadingFailureReason());
}
- @Test(timeout=60000)
+ @Test
+ @Timeout(value = 60)
public void testStartupShutdown() throws Exception {
File path = new File(TEST_BASE, "testStartupShutdown");
path.mkdirs();
@@ -62,7 +67,8 @@ public void testStartupShutdown() throws Exception {
FileUtil.fullyDelete(path);
}
- @Test(timeout=60000)
+ @Test
+ @Timeout(value = 60)
public void testAllocateSlots() throws Exception {
File path = new File(TEST_BASE, "testAllocateSlots");
path.mkdirs();
@@ -83,17 +89,17 @@ public void testAllocateSlots() throws Exception {
int slotIdx = 0;
for (Iterator iter = shm.slotIterator();
iter.hasNext(); ) {
- Assert.assertTrue(slots.contains(iter.next()));
+ assertTrue(slots.contains(iter.next()));
}
for (Slot slot : slots) {
- Assert.assertFalse(slot.addAnchor());
- Assert.assertEquals(slotIdx++, slot.getSlotIdx());
+ assertFalse(slot.addAnchor());
+ assertEquals(slotIdx++, slot.getSlotIdx());
}
for (Slot slot : slots) {
slot.makeAnchorable();
}
for (Slot slot : slots) {
- Assert.assertTrue(slot.addAnchor());
+ assertTrue(slot.addAnchor());
}
for (Slot slot : slots) {
slot.removeAnchor();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index b624f18bd14ba..c12b6831d45db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -24,8 +24,7 @@
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@@ -44,6 +43,12 @@
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
/**
* Test {@link ByteArrayManager}.
*/
@@ -93,15 +98,15 @@ public Integer call() throws Exception {
}
// check futures
- Assert.assertEquals(n, futures.size());
+ assertEquals(n, futures.size());
for(int i = 0; i < n; i++) {
- Assert.assertEquals(i + 1, futures.get(i).get().intValue());
+ assertEquals(i + 1, futures.get(i).get().intValue());
}
- Assert.assertEquals(n, c.getCount());
+ assertEquals(n, c.getCount());
// test auto-reset
Thread.sleep(countResetTimePeriodMs + 100);
- Assert.assertEquals(1, c.increment());
+ assertEquals(1, c.increment());
}
@@ -131,12 +136,12 @@ public void testAllocateRecycle() throws Exception {
}
waitForAll(allocator.futures);
- Assert.assertEquals(countThreshold,
+ assertEquals(countThreshold,
counters.get(arrayLength, false).getCount());
- Assert.assertNull(managers.get(arrayLength, false));
+ assertNull(managers.get(arrayLength, false));
for(int n : uncommonArrays) {
- Assert.assertNull(counters.get(n, false));
- Assert.assertNull(managers.get(n, false));
+ assertNull(counters.get(n, false));
+ assertNull(managers.get(n, false));
}
}
@@ -146,7 +151,7 @@ public void testAllocateRecycle() throws Exception {
}
for(Future f : recycler.furtures) {
- Assert.assertEquals(-1, f.get().intValue());
+ assertEquals(-1, f.get().intValue());
}
recycler.furtures.clear();
}
@@ -154,8 +159,8 @@ public void testAllocateRecycle() throws Exception {
{ // allocate one more
allocator.submit(arrayLength).get();
- Assert.assertEquals(countThreshold + 1, counters.get(arrayLength, false).getCount());
- Assert.assertNotNull(managers.get(arrayLength, false));
+ assertEquals(countThreshold + 1, counters.get(arrayLength, false).getCount());
+ assertNotNull(managers.get(arrayLength, false));
}
{ // recycle the remaining arrays
@@ -182,25 +187,25 @@ public void testAllocateRecycle() throws Exception {
if (threadState != Thread.State.RUNNABLE
&& threadState != Thread.State.WAITING
&& threadState != Thread.State.TIMED_WAITING) {
- Assert.fail("threadState = " + threadState);
+ fail("threadState = " + threadState);
}
}
// recycle an array
recycler.submit(removeLast(allocator.futures).get());
- Assert.assertEquals(1, removeLast(recycler.furtures).get().intValue());
+ assertEquals(1, removeLast(recycler.furtures).get().intValue());
// check if the thread is unblocked
Thread.sleep(100);
- Assert.assertEquals(Thread.State.TERMINATED, t.getState());
+ assertEquals(Thread.State.TERMINATED, t.getState());
// recycle the remaining, the recycle should be full.
- Assert.assertEquals(countLimit-1, allocator.recycleAll(recycler));
+ assertEquals(countLimit-1, allocator.recycleAll(recycler));
recycler.submit(t.array);
recycler.verify(countLimit);
// recycle one more; it should not increase the free queue size
- Assert.assertEquals(countLimit, bam.release(new byte[arrayLength]));
+ assertEquals(countLimit, bam.release(new byte[arrayLength]));
}
} finally {
allocator.pool.shutdown();
@@ -255,7 +260,7 @@ Future submit(final int arrayLength) {
@Override
public byte[] call() throws Exception {
final byte[] array = bam.newByteArray(arrayLength);
- Assert.assertEquals(arrayLength, array.length);
+ assertEquals(arrayLength, array.length);
return array;
}
});
@@ -294,10 +299,10 @@ public Integer call() throws Exception {
}
void verify(final int expectedSize) throws Exception {
- Assert.assertEquals(expectedSize, furtures.size());
+ assertEquals(expectedSize, furtures.size());
Collections.sort(furtures, CMP);
for(int i = 0; i < furtures.size(); i++) {
- Assert.assertEquals(i+1, furtures.get(i).get().intValue());
+ assertEquals(i+1, furtures.get(i).get().intValue());
}
furtures.clear();
}
@@ -364,24 +369,24 @@ boolean shouldRun() {
randomRecycler.start();
randomRecycler.join();
- Assert.assertTrue(exceptions.isEmpty());
+ assertTrue(exceptions.isEmpty());
- Assert.assertNull(counters.get(0, false));
+ assertNull(counters.get(0, false));
for(int i = 1; i < runners.length; i++) {
if (!runners[i].assertionErrors.isEmpty()) {
for(AssertionError e : runners[i].assertionErrors) {
LOG.error("AssertionError " + i, e);
}
- Assert.fail(runners[i].assertionErrors.size() + " AssertionError(s)");
+ fail(runners[i].assertionErrors.size() + " AssertionError(s)");
}
final int arrayLength = Runner.index2arrayLength(i);
final boolean exceedCountThreshold = counters.get(arrayLength, false).getCount() > countThreshold;
final FixedLengthManager m = managers.get(arrayLength, false);
if (exceedCountThreshold) {
- Assert.assertNotNull(m);
+ assertNotNull(m);
} else {
- Assert.assertNull(m);
+ assertNull(m);
}
}
}
@@ -391,7 +396,7 @@ static void sleepMs(long ms) {
Thread.sleep(ms);
} catch (InterruptedException e) {
e.printStackTrace();
- Assert.fail("Sleep is interrupted: " + e);
+ fail("Sleep is interrupted: " + e);
}
}
@@ -443,8 +448,8 @@ public byte[] call() throws Exception {
maxArrayLength - lower) + lower + 1;
final byte[] array = bam.newByteArray(arrayLength);
try {
- Assert.assertEquals("arrayLength=" + arrayLength + ", lower=" + lower,
- maxArrayLength, array.length);
+ assertEquals(maxArrayLength, array.length, "arrayLength=" + arrayLength +
+ ", lower=" + lower);
} catch(AssertionError e) {
assertionErrors.add(e);
}
@@ -507,7 +512,7 @@ public void run() {
}
} catch (Exception e) {
e.printStackTrace();
- Assert.fail(this + " has " + e);
+ fail(this + " has " + e);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestECPolicyLoader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestECPolicyLoader.java
index 55fa56784f524..0a923879d4bef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestECPolicyLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestECPolicyLoader.java
@@ -19,15 +19,15 @@
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.util.List;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
index 010d7c5870c54..b70b12fbe08a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.hdfs.web;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.CALLS_REAL_METHODS;
@@ -37,7 +37,7 @@
import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders;
import org.apache.hadoop.hdfs.web.ByteRangeInputStream.InputStreamAndFileLength;
import org.apache.hadoop.test.Whitebox;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
public class TestByteRangeInputStream {
@@ -83,20 +83,17 @@ public void testByteRange() throws IOException {
bris.seek(0);
- assertEquals("getPos wrong", 0, bris.getPos());
+ assertEquals(0, bris.getPos(), "getPos wrong");
bris.read();
- assertEquals("Initial call made incorrectly (offset check)",
- 0, bris.startPos);
- assertEquals("getPos should return 1 after reading one byte", 1,
- bris.getPos());
+ assertEquals(0, bris.startPos, "Initial call made incorrectly (offset check)");
+ assertEquals(1, bris.getPos(), "getPos should return 1 after reading one byte");
verify(oMock, times(1)).connect(0, false);
bris.read();
- assertEquals("getPos should return 2 after reading two bytes", 2,
- bris.getPos());
+ assertEquals(2, bris.getPos(), "getPos should return 2 after reading two bytes");
// No additional connections should have been made (no seek)
verify(oMock, times(1)).connect(0, false);
@@ -105,10 +102,10 @@ public void testByteRange() throws IOException {
bris.seek(100);
bris.read();
- assertEquals("Seek to 100 bytes made incorrectly (offset Check)",
- 100, bris.startPos);
- assertEquals("getPos should return 101 after reading one byte", 101,
- bris.getPos());
+ assertEquals(100, bris.startPos,
+ "Seek to 100 bytes made incorrectly (offset Check)");
+ assertEquals(101, bris.getPos(),
+ "getPos should return 101 after reading one byte");
verify(rMock, times(1)).connect(100, true);
bris.seek(101);
@@ -121,8 +118,7 @@ public void testByteRange() throws IOException {
bris.seek(2500);
bris.read();
- assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",
- 2500, bris.startPos);
+ assertEquals(2500, bris.startPos, "Seek to 2500 bytes made incorrectly (offset Check)");
doReturn(getMockConnection(null))
.when(rMock).connect(anyLong(), anyBoolean());
@@ -131,9 +127,8 @@ public void testByteRange() throws IOException {
bris.read();
fail("Exception should be thrown when content-length is not given");
} catch (IOException e) {
- assertTrue("Incorrect response message: " + e.getMessage(),
- e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH +
- " is missing: "));
+ assertTrue(e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH + " is missing: "),
+ "Incorrect response message: " + e.getMessage());
}
bris.close();
}
@@ -203,7 +198,7 @@ public void testPropagatedClose() throws IOException {
errored = true;
assertEquals("Stream closed", e.getMessage());
} finally {
- assertTrue("Read a closed steam", errored);
+ assertTrue(errored, "Read a closed steam");
}
verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
@@ -223,35 +218,31 @@ public void testAvailable() throws IOException {
ByteRangeInputStream.StreamStatus.SEEK);
- assertEquals("Before read or seek, available should be same as filelength",
- 65535, bris.available());
+ assertEquals(65535, bris.available(),
+ "Before read or seek, available should be same as filelength");
verify(bris, times(1)).openInputStream(Mockito.anyLong());
bris.seek(10);
- assertEquals("Seek 10 bytes, available should return filelength - 10"
- , 65525,
- bris.available());
+ assertEquals(65525, bris.available(), "Seek 10 bytes, available should return filelength - 10");
//no more bytes available
bris.seek(65535);
- assertEquals("Seek till end of file, available should return 0 bytes", 0,
- bris.available());
+ assertEquals(0, bris.available(), "Seek till end of file, available should return 0 bytes");
//test reads, seek back to 0 and start reading
bris.seek(0);
bris.read();
- assertEquals("Read 1 byte, available must return filelength - 1",
- 65534, bris.available());
+ assertEquals(65534, bris.available(), "Read 1 byte, available must return filelength - 1");
bris.read();
- assertEquals("Read another 1 byte, available must return filelength - 2",
- 65533, bris.available());
+ assertEquals(65533, bris.available(),
+ "Read another 1 byte, available must return filelength - 2");
//seek and read
bris.seek(100);
bris.read();
- assertEquals("Seek to offset 100 and read 1 byte, available should return filelength - 101",
- 65434, bris.available());
+ assertEquals(65434, bris.available(),
+ "Seek to offset 100 and read 1 byte, available should return filelength - 101");
bris.close();
}
@@ -284,8 +275,7 @@ public void testAvailableStreamClosed() throws IOException {
bris.available();
fail("Exception should be thrown when stream is closed");
}catch(IOException e){
- assertTrue("Exception when stream is closed",
- e.getMessage().equals("Stream closed"));
+ assertTrue(e.getMessage().equals("Stream closed"), "Exception when stream is closed");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
index 1a95fc8c327e0..c7a207fa598b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
@@ -17,12 +17,12 @@
*/
package org.apache.hadoop.hdfs.web;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.net.URL;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestOffsetUrlInputStream {
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
index 4662f4747a1e0..1a86cf6645d2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
@@ -18,13 +18,13 @@
package org.apache.hadoop.hdfs.web;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doReturn;
@@ -55,7 +55,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.Whitebox;
import org.apache.hadoop.util.Progressable;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
public class TestTokenAspect {
@@ -253,7 +253,7 @@ public void testInitWithUGIToken() throws IOException, URISyntaxException {
fs.ugi.addToken(token);
fs.ugi.addToken(new Token(new byte[0], new byte[0],
new Text("Other token"), new Text("127.0.0.1:8021")));
- assertEquals("wrong tokens in user", 2, fs.ugi.getTokens().size());
+ assertEquals(2, fs.ugi.getTokens().size(), "wrong tokens in user");
fs.emulateSecurityEnabled = true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
index 1fe6dcad932bc..4e79eb837cc5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
@@ -28,12 +28,15 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.LoggerFactory;
@@ -47,14 +50,14 @@ public void testConnConfiguratior() throws IOException {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
- Assert.assertEquals(u, conn.getURL());
+ assertEquals(u, conn.getURL());
conns.add(conn);
return conn;
}
});
fc.openConnection(u);
- Assert.assertEquals(1, conns.size());
+ assertEquals(1, conns.size());
}
@Test
@@ -65,9 +68,9 @@ public void testSSLInitFailure() throws Exception {
GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(URLConnectionFactory.class));
URLConnectionFactory.newDefaultURLConnectionFactory(conf);
- Assert.assertTrue("Expected log for ssl init failure not found!",
- logs.getOutput().contains(
- "Cannot load customized ssl related configuration"));
+ assertTrue(logs.getOutput()
+ .contains("Cannot load customized ssl related configuration"),
+ "Expected log for ssl init failure not found!");
}
@Test
@@ -104,7 +107,7 @@ public void testSSLFactoryCleanup() throws Exception {
reloaderThread = thread;
}
}
- Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive());
+ assertTrue(reloaderThread.isAlive(), "Reloader is not alive");
fs.close();
@@ -116,6 +119,6 @@ public void testSSLFactoryCleanup() throws Exception {
}
Thread.sleep(1000);
}
- Assert.assertFalse("Reloader is still alive", reloaderStillAlive);
+ assertFalse(reloaderStillAlive, "Reloader is still alive");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
index af17d0b1e5ee3..2e7abba20ec6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
@@ -28,9 +28,9 @@
import org.apache.hadoop.hdfs.web.oauth2.CredentialBasedAccessTokenProvider;
import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
import org.apache.http.HttpStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockserver.client.MockServerClient;
import org.mockserver.integration.ClientAndServer;
import org.mockserver.model.Header;
@@ -51,7 +51,7 @@
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockserver.integration.ClientAndServer.startClientAndServer;
import static org.mockserver.matchers.Times.exactly;
import static org.mockserver.model.HttpRequest.request;
@@ -72,11 +72,11 @@ public class TestWebHDFSOAuth2 {
public final static String AUTH_TOKEN = "0123456789abcdef";
public final static Header AUTH_TOKEN_HEADER = new Header("AUTHORIZATION", OAuth2ConnectionConfigurator.HEADER + AUTH_TOKEN);
- @Before
+ @BeforeEach
public void startMockOAuthServer() {
mockOAuthServer = startClientAndServer(OAUTH_PORT);
}
- @Before
+ @BeforeEach
public void startMockWebHDFSServer() {
System.setProperty("hadoop.home.dir", System.getProperty("user.dir"));
@@ -205,12 +205,12 @@ public Configuration getConfiguration() {
}
- @After
+ @AfterEach
public void stopMockWebHDFSServer() {
mockWebHDFS.stop();
}
- @After
+ @AfterEach
public void stopMockOAuthServer() {
mockOAuthServer.stop();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
index 5577bb6266486..ccd80cb8837ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
@@ -36,13 +36,15 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+@Timeout(30)
public class TestWebHdfsContentLength {
private static ServerSocket listenSocket;
private static String bindAddr;
@@ -60,10 +62,7 @@ public class TestWebHdfsContentLength {
private static ExecutorService executor;
- @Rule
- public Timeout timeout = new Timeout(30000);
-
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
listenSocket = new ServerSocket();
listenSocket.bind(null);
@@ -79,7 +78,7 @@ public static void setup() throws IOException {
executor = Executors.newSingleThreadExecutor();
}
- @AfterClass
+ @AfterAll
public static void teardown() throws IOException {
if (listenSocket != null) {
listenSocket.close();
@@ -94,9 +93,9 @@ public void testGetOp() throws Exception {
Future future = contentLengthFuture(errResponse);
try {
fs.getFileStatus(p);
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals(null, getContentLength(future));
+ assertEquals(null, getContentLength(future));
}
@Test
@@ -106,11 +105,11 @@ public void testGetOpWithRedirect() {
Future future3 = contentLengthFuture(errResponse);
try {
fs.open(p).read();
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals(null, getContentLength(future1));
- Assert.assertEquals(null, getContentLength(future2));
- Assert.assertEquals(null, getContentLength(future3));
+ assertEquals(null, getContentLength(future1));
+ assertEquals(null, getContentLength(future2));
+ assertEquals(null, getContentLength(future3));
}
@Test
@@ -118,9 +117,9 @@ public void testPutOp() {
Future future = contentLengthFuture(errResponse);
try {
fs.mkdirs(p);
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals("0", getContentLength(future));
+ assertEquals("0", getContentLength(future));
}
@Test
@@ -131,10 +130,10 @@ public void testPutOpWithRedirect() {
FSDataOutputStream os = fs.create(p);
os.write(new byte[]{0});
os.close();
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals("0", getContentLength(future1));
- Assert.assertEquals("chunked", getContentLength(future2));
+ assertEquals("0", getContentLength(future1));
+ assertEquals("chunked", getContentLength(future2));
}
@Test
@@ -142,9 +141,9 @@ public void testPostOp() {
Future future = contentLengthFuture(errResponse);
try {
fs.concat(p, new Path[]{p});
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals("0", getContentLength(future));
+ assertEquals("0", getContentLength(future));
}
@Test
@@ -156,10 +155,10 @@ public void testPostOpWithRedirect() {
FSDataOutputStream os = fs.append(p);
os.write(new byte[]{0});
os.close();
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals("0", getContentLength(future1));
- Assert.assertEquals("chunked", getContentLength(future2));
+ assertEquals("0", getContentLength(future1));
+ assertEquals("chunked", getContentLength(future2));
}
@Test
@@ -167,9 +166,9 @@ public void testDelete() {
Future future = contentLengthFuture(errResponse);
try {
fs.delete(p, false);
- Assert.fail();
+ fail();
} catch (IOException ioe) {} // expected
- Assert.assertEquals(null, getContentLength(future));
+ assertEquals(null, getContentLength(future));
}
private String getContentLength(Future future) {
@@ -177,7 +176,7 @@ private String getContentLength(Future future) {
try {
request = future.get(2, TimeUnit.SECONDS);
} catch (Exception e) {
- Assert.fail(e.toString());
+ fail(e.toString());
}
Matcher matcher = contentLengthPattern.matcher(request);
return matcher.find() ? matcher.group(2) : null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
index c387b1ebceebe..f37db020fc6d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
@@ -19,11 +19,11 @@
package org.apache.hadoop.hdfs.web.oauth2;
import org.apache.hadoop.util.Timer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
index 9ae7ff88fb6b0..164fd0ca53235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.util.Timer;
import org.apache.http.HttpStatus;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockserver.client.MockServerClient;
import org.mockserver.integration.ClientAndServer;
import org.mockserver.model.Header;
@@ -46,7 +46,7 @@
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockserver.integration.ClientAndServer.startClientAndServer;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
index 3ef105ca246c4..c76251ed71427 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Timer;
import org.apache.http.HttpStatus;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockserver.client.MockServerClient;
import org.mockserver.integration.ClientAndServer;
import org.mockserver.model.Header;
@@ -46,7 +46,7 @@
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockserver.integration.ClientAndServer.startClientAndServer;