Skip to content

Commit a796325

Browse files
authored
fix: ensure S3 and GCS integ tests are conditionally compiled only when the storage-s3 and storage-gcs features are enabled (#552)
1 parent cb3c8be commit a796325

File tree

2 files changed

+178
-173
lines changed

2 files changed

+178
-173
lines changed

crates/iceberg/tests/file_io_gcs_test.rs

Lines changed: 108 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -17,109 +17,112 @@
1717

1818
//! Integration tests for FileIO Google Cloud Storage (GCS).
1919
20-
use std::collections::HashMap;
21-
use std::net::SocketAddr;
22-
use std::sync::RwLock;
23-
24-
use bytes::Bytes;
25-
use ctor::{ctor, dtor};
26-
use iceberg::io::{FileIO, FileIOBuilder, GCS_NO_AUTH, GCS_SERVICE_PATH};
27-
use iceberg_test_utils::docker::DockerCompose;
28-
use iceberg_test_utils::{normalize_test_name, set_up};
29-
30-
static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
31-
static FAKE_GCS_PORT: u16 = 4443;
32-
static FAKE_GCS_BUCKET: &str = "test-bucket";
33-
34-
#[ctor]
35-
fn before_all() {
36-
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
37-
let docker_compose = DockerCompose::new(
38-
normalize_test_name(module_path!()),
39-
format!("{}/testdata/file_io_gcs", env!("CARGO_MANIFEST_DIR")),
40-
);
41-
docker_compose.run();
42-
guard.replace(docker_compose);
43-
}
44-
45-
#[dtor]
46-
fn after_all() {
47-
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
48-
guard.take();
49-
}
50-
51-
async fn get_file_io_gcs() -> FileIO {
52-
set_up();
53-
54-
let ip = DOCKER_COMPOSE_ENV
55-
.read()
56-
.unwrap()
57-
.as_ref()
58-
.unwrap()
59-
.get_container_ip("gcs-server");
60-
let addr = SocketAddr::new(ip, FAKE_GCS_PORT);
61-
62-
// A bucket must exist for FileIO
63-
create_bucket(FAKE_GCS_BUCKET, addr.to_string())
64-
.await
65-
.unwrap();
66-
67-
FileIOBuilder::new("gcs")
68-
.with_props(vec![
69-
(GCS_SERVICE_PATH, format!("http://{}", addr)),
70-
(GCS_NO_AUTH, "true".to_string()),
71-
])
72-
.build()
73-
.unwrap()
74-
}
75-
76-
// Create a bucket against the emulated GCS storage server.
77-
async fn create_bucket(name: &str, server_addr: String) -> anyhow::Result<()> {
78-
let mut bucket_data = HashMap::new();
79-
bucket_data.insert("name", name);
80-
81-
let client = reqwest::Client::new();
82-
let endpoint = format!("http://{}/storage/v1/b", server_addr);
83-
client.post(endpoint).json(&bucket_data).send().await?;
84-
Ok(())
85-
}
86-
87-
fn get_gs_path() -> String {
88-
format!("gs://{}", FAKE_GCS_BUCKET)
89-
}
90-
91-
#[tokio::test]
92-
async fn gcs_exists() {
93-
let file_io = get_file_io_gcs().await;
94-
assert!(file_io
95-
.is_exist(format!("{}/", get_gs_path()))
96-
.await
97-
.unwrap());
98-
}
99-
100-
#[tokio::test]
101-
async fn gcs_write() {
102-
let gs_file = format!("{}/write-file", get_gs_path());
103-
let file_io = get_file_io_gcs().await;
104-
let output = file_io.new_output(&gs_file).unwrap();
105-
output
106-
.write(bytes::Bytes::from_static(b"iceberg-gcs!"))
107-
.await
108-
.expect("Write to test output file");
109-
assert!(file_io.is_exist(gs_file).await.unwrap())
110-
}
111-
112-
#[tokio::test]
113-
async fn gcs_read() {
114-
let gs_file = format!("{}/read-gcs", get_gs_path());
115-
let file_io = get_file_io_gcs().await;
116-
let output = file_io.new_output(&gs_file).unwrap();
117-
output
118-
.write(bytes::Bytes::from_static(b"iceberg!"))
119-
.await
120-
.expect("Write to test output file");
121-
assert!(file_io.is_exist(&gs_file).await.unwrap());
122-
123-
let input = file_io.new_input(gs_file).unwrap();
124-
assert_eq!(input.read().await.unwrap(), Bytes::from_static(b"iceberg!"));
20+
#[cfg(all(test, feature = "storage-gcs"))]
21+
mod tests {
22+
use std::collections::HashMap;
23+
use std::net::SocketAddr;
24+
use std::sync::RwLock;
25+
26+
use bytes::Bytes;
27+
use ctor::{ctor, dtor};
28+
use iceberg::io::{FileIO, FileIOBuilder, GCS_NO_AUTH, GCS_SERVICE_PATH};
29+
use iceberg_test_utils::docker::DockerCompose;
30+
use iceberg_test_utils::{normalize_test_name, set_up};
31+
32+
static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
33+
static FAKE_GCS_PORT: u16 = 4443;
34+
static FAKE_GCS_BUCKET: &str = "test-bucket";
35+
36+
#[ctor]
37+
fn before_all() {
38+
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
39+
let docker_compose = DockerCompose::new(
40+
normalize_test_name(module_path!()),
41+
format!("{}/testdata/file_io_gcs", env!("CARGO_MANIFEST_DIR")),
42+
);
43+
docker_compose.run();
44+
guard.replace(docker_compose);
45+
}
46+
47+
#[dtor]
48+
fn after_all() {
49+
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
50+
guard.take();
51+
}
52+
53+
async fn get_file_io_gcs() -> FileIO {
54+
set_up();
55+
56+
let ip = DOCKER_COMPOSE_ENV
57+
.read()
58+
.unwrap()
59+
.as_ref()
60+
.unwrap()
61+
.get_container_ip("gcs-server");
62+
let addr = SocketAddr::new(ip, FAKE_GCS_PORT);
63+
64+
// A bucket must exist for FileIO
65+
create_bucket(FAKE_GCS_BUCKET, addr.to_string())
66+
.await
67+
.unwrap();
68+
69+
FileIOBuilder::new("gcs")
70+
.with_props(vec![
71+
(GCS_SERVICE_PATH, format!("http://{}", addr)),
72+
(GCS_NO_AUTH, "true".to_string()),
73+
])
74+
.build()
75+
.unwrap()
76+
}
77+
78+
// Create a bucket against the emulated GCS storage server.
79+
async fn create_bucket(name: &str, server_addr: String) -> anyhow::Result<()> {
80+
let mut bucket_data = HashMap::new();
81+
bucket_data.insert("name", name);
82+
83+
let client = reqwest::Client::new();
84+
let endpoint = format!("http://{}/storage/v1/b", server_addr);
85+
client.post(endpoint).json(&bucket_data).send().await?;
86+
Ok(())
87+
}
88+
89+
fn get_gs_path() -> String {
90+
format!("gs://{}", FAKE_GCS_BUCKET)
91+
}
92+
93+
#[tokio::test]
94+
async fn gcs_exists() {
95+
let file_io = get_file_io_gcs().await;
96+
assert!(file_io
97+
.is_exist(format!("{}/", get_gs_path()))
98+
.await
99+
.unwrap());
100+
}
101+
102+
#[tokio::test]
103+
async fn gcs_write() {
104+
let gs_file = format!("{}/write-file", get_gs_path());
105+
let file_io = get_file_io_gcs().await;
106+
let output = file_io.new_output(&gs_file).unwrap();
107+
output
108+
.write(bytes::Bytes::from_static(b"iceberg-gcs!"))
109+
.await
110+
.expect("Write to test output file");
111+
assert!(file_io.is_exist(gs_file).await.unwrap())
112+
}
113+
114+
#[tokio::test]
115+
async fn gcs_read() {
116+
let gs_file = format!("{}/read-gcs", get_gs_path());
117+
let file_io = get_file_io_gcs().await;
118+
let output = file_io.new_output(&gs_file).unwrap();
119+
output
120+
.write(bytes::Bytes::from_static(b"iceberg!"))
121+
.await
122+
.expect("Write to test output file");
123+
assert!(file_io.is_exist(&gs_file).await.unwrap());
124+
125+
let input = file_io.new_input(gs_file).unwrap();
126+
assert_eq!(input.read().await.unwrap(), Bytes::from_static(b"iceberg!"));
127+
}
125128
}

crates/iceberg/tests/file_io_s3_test.rs

Lines changed: 70 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -16,86 +16,88 @@
1616
// under the License.
1717

1818
//! Integration tests for FileIO S3.
19+
#[cfg(all(test, feature = "storage-s3"))]
20+
mod tests {
21+
use std::net::SocketAddr;
22+
use std::sync::RwLock;
1923

20-
use std::net::SocketAddr;
21-
use std::sync::RwLock;
24+
use ctor::{ctor, dtor};
25+
use iceberg::io::{
26+
FileIO, FileIOBuilder, S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, S3_SECRET_ACCESS_KEY,
27+
};
28+
use iceberg_test_utils::docker::DockerCompose;
29+
use iceberg_test_utils::{normalize_test_name, set_up};
2230

23-
use ctor::{ctor, dtor};
24-
use iceberg::io::{
25-
FileIO, FileIOBuilder, S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, S3_SECRET_ACCESS_KEY,
26-
};
27-
use iceberg_test_utils::docker::DockerCompose;
28-
use iceberg_test_utils::{normalize_test_name, set_up};
31+
const MINIO_PORT: u16 = 9000;
32+
static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
2933

30-
const MINIO_PORT: u16 = 9000;
31-
static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
32-
33-
#[ctor]
34-
fn before_all() {
35-
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
36-
let docker_compose = DockerCompose::new(
37-
normalize_test_name(module_path!()),
38-
format!("{}/testdata/file_io_s3", env!("CARGO_MANIFEST_DIR")),
39-
);
40-
docker_compose.run();
41-
guard.replace(docker_compose);
42-
}
34+
#[ctor]
35+
fn before_all() {
36+
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
37+
let docker_compose = DockerCompose::new(
38+
normalize_test_name(module_path!()),
39+
format!("{}/testdata/file_io_s3", env!("CARGO_MANIFEST_DIR")),
40+
);
41+
docker_compose.run();
42+
guard.replace(docker_compose);
43+
}
4344

44-
#[dtor]
45-
fn after_all() {
46-
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
47-
guard.take();
48-
}
45+
#[dtor]
46+
fn after_all() {
47+
let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
48+
guard.take();
49+
}
4950

50-
async fn get_file_io() -> FileIO {
51-
set_up();
51+
async fn get_file_io() -> FileIO {
52+
set_up();
5253

53-
let guard = DOCKER_COMPOSE_ENV.read().unwrap();
54-
let docker_compose = guard.as_ref().unwrap();
55-
let container_ip = docker_compose.get_container_ip("minio");
56-
let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
54+
let guard = DOCKER_COMPOSE_ENV.read().unwrap();
55+
let docker_compose = guard.as_ref().unwrap();
56+
let container_ip = docker_compose.get_container_ip("minio");
57+
let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
5758

58-
FileIOBuilder::new("s3")
59-
.with_props(vec![
60-
(S3_ENDPOINT, format!("http://{}", minio_socket_addr)),
61-
(S3_ACCESS_KEY_ID, "admin".to_string()),
62-
(S3_SECRET_ACCESS_KEY, "password".to_string()),
63-
(S3_REGION, "us-east-1".to_string()),
64-
])
65-
.build()
66-
.unwrap()
67-
}
68-
69-
#[tokio::test]
70-
async fn test_file_io_s3_is_exist() {
71-
let file_io = get_file_io().await;
72-
assert!(!file_io.is_exist("s3://bucket2/any").await.unwrap());
73-
assert!(file_io.is_exist("s3://bucket1/").await.unwrap());
74-
}
59+
FileIOBuilder::new("s3")
60+
.with_props(vec![
61+
(S3_ENDPOINT, format!("http://{}", minio_socket_addr)),
62+
(S3_ACCESS_KEY_ID, "admin".to_string()),
63+
(S3_SECRET_ACCESS_KEY, "password".to_string()),
64+
(S3_REGION, "us-east-1".to_string()),
65+
])
66+
.build()
67+
.unwrap()
68+
}
7569

76-
#[tokio::test]
77-
async fn test_file_io_s3_output() {
78-
let file_io = get_file_io().await;
79-
assert!(!file_io.is_exist("s3://bucket1/test_output").await.unwrap());
80-
let output_file = file_io.new_output("s3://bucket1/test_output").unwrap();
81-
{
82-
output_file.write("123".into()).await.unwrap();
70+
#[tokio::test]
71+
async fn test_file_io_s3_is_exist() {
72+
let file_io = get_file_io().await;
73+
assert!(!file_io.is_exist("s3://bucket2/any").await.unwrap());
74+
assert!(file_io.is_exist("s3://bucket1/").await.unwrap());
8375
}
84-
assert!(file_io.is_exist("s3://bucket1/test_output").await.unwrap());
85-
}
8676

87-
#[tokio::test]
88-
async fn test_file_io_s3_input() {
89-
let file_io = get_file_io().await;
90-
let output_file = file_io.new_output("s3://bucket1/test_input").unwrap();
91-
{
92-
output_file.write("test_input".into()).await.unwrap();
77+
#[tokio::test]
78+
async fn test_file_io_s3_output() {
79+
let file_io = get_file_io().await;
80+
assert!(!file_io.is_exist("s3://bucket1/test_output").await.unwrap());
81+
let output_file = file_io.new_output("s3://bucket1/test_output").unwrap();
82+
{
83+
output_file.write("123".into()).await.unwrap();
84+
}
85+
assert!(file_io.is_exist("s3://bucket1/test_output").await.unwrap());
9386
}
9487

95-
let input_file = file_io.new_input("s3://bucket1/test_input").unwrap();
88+
#[tokio::test]
89+
async fn test_file_io_s3_input() {
90+
let file_io = get_file_io().await;
91+
let output_file = file_io.new_output("s3://bucket1/test_input").unwrap();
92+
{
93+
output_file.write("test_input".into()).await.unwrap();
94+
}
95+
96+
let input_file = file_io.new_input("s3://bucket1/test_input").unwrap();
9697

97-
{
98-
let buffer = input_file.read().await.unwrap();
99-
assert_eq!(buffer, "test_input".as_bytes());
98+
{
99+
let buffer = input_file.read().await.unwrap();
100+
assert_eq!(buffer, "test_input".as_bytes());
101+
}
100102
}
101103
}

0 commit comments

Comments
 (0)