diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 1cbdf6d..e2a0b8e 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -1,28 +1,96 @@ name: Integration Tests on: - push: - branches: [ main ] pull_request: - branches: [ main ] - workflow_dispatch: # Allow manual triggering + branches: [main] + push: + branches: [main] + workflow_dispatch: + schedule: + - cron: "0 2 * * *" env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 + TEST_S3_ENDPOINT: http://localhost:9000 + TEST_S3_ACCESS_KEY: accesskey + TEST_S3_SECRET_KEY: secretkey jobs: - integration: - name: Integration Tests + smoke-latest: + name: Smoke (RustFS latest) + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + + - name: Start RustFS latest + run: | + docker run -d --name rustfs \ + -p 9000:9000 \ + -p 9001:9001 \ + -v rustfs-data:/data \ + -e RUSTFS_ROOT_USER=accesskey \ + -e RUSTFS_ROOT_PASSWORD=secretkey \ + -e RUSTFS_ACCESS_KEY=accesskey \ + -e RUSTFS_SECRET_KEY=secretkey \ + -e RUSTFS_VOLUMES=/data \ + -e RUSTFS_ADDRESS=":9000" \ + -e RUSTFS_CONSOLE_ENABLE="true" \ + -e RUSTFS_CONSOLE_ADDRESS=":9001" \ + rustfs/rustfs:latest + + - name: Wait for RustFS + run: | + for i in {1..60}; do + if curl -sf http://localhost:9000/health > /dev/null 2>&1; then + echo "RustFS is ready" + exit 0 + fi + sleep 1 + done + echo "RustFS failed to start" + docker logs rustfs + exit 1 + + - name: Run smoke compatibility tests + run: | + set -euo pipefail + TESTS=( + "object_operations::test_upload_and_download_small_file" + "object_operations::test_move_recursive_prefix_s3_to_s3" + "quota_operations::test_bucket_quota_set_info_clear" + ) + + for test_name in "${TESTS[@]}"; do + echo "==> Running $test_name" + cargo test \ + --package rustfs-cli \ + --test integration \ + --features integration \ + "$test_name" \ + -- \ + --exact \ + --test-threads=1 + done + + - name: Show RustFS logs on failure + if: failure() + run: docker logs rustfs 2>&1 | tail -200 + + full-latest: + name: Full Integration (RustFS latest) runs-on: ubuntu-latest - # Don't block PR merges - integration tests are supplementary - continue-on-error: true + timeout-minutes: 90 + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' steps: - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - - name: Start RustFS + - name: Start RustFS latest run: | docker run -d --name rustfs \ -p 9000:9000 \ @@ -36,41 +104,32 @@ jobs: -e RUSTFS_ADDRESS=":9000" \ -e RUSTFS_CONSOLE_ENABLE="true" \ -e RUSTFS_CONSOLE_ADDRESS=":9001" \ - rustfs/rustfs:1.0.0-alpha.81 + rustfs/rustfs:latest - name: Wait for RustFS run: | - echo "Waiting for RustFS to start..." - sleep 3 - for i in {1..30}; do - # Try health endpoint + for i in {1..60}; do if curl -sf http://localhost:9000/health > /dev/null 2>&1; then - echo "RustFS is ready!" + echo "RustFS is ready" exit 0 fi - echo "Waiting for RustFS... ($i/30)" - sleep 2 + sleep 1 done echo "RustFS failed to start" docker logs rustfs exit 1 - - name: Run integration tests + - name: Run full integration suite run: cargo test --package rustfs-cli --test integration --features integration -- --test-threads=1 - env: - TEST_S3_ENDPOINT: http://localhost:9000 - TEST_S3_ACCESS_KEY: accesskey - TEST_S3_SECRET_KEY: secretkey - name: Show RustFS logs on failure if: failure() - run: docker logs rustfs 2>&1 | tail -100 + run: docker logs rustfs 2>&1 | tail -200 golden: name: Golden Tests runs-on: ubuntu-latest - # Don't block PR merges - continue-on-error: true + timeout-minutes: 20 steps: - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable @@ -79,5 +138,4 @@ jobs: - name: Run golden tests run: cargo test --package rustfs-cli --test golden --features golden env: - # Golden tests use isolated config dir, no real S3 needed for alias tests RC_CONFIG_DIR: ${{ runner.temp }}/rc-test-config diff --git a/crates/cli/src/commands/mv.rs b/crates/cli/src/commands/mv.rs index d78a087..d91a994 100644 --- a/crates/cli/src/commands/mv.rs +++ b/crates/cli/src/commands/mv.rs @@ -3,7 +3,7 @@ //! Moves objects between locations (copy + delete). use clap::Args; -use rc_core::{AliasManager, ObjectStore as _, ParsedPath, RemotePath, parse_path}; +use rc_core::{AliasManager, ListOptions, ObjectStore as _, ParsedPath, RemotePath, parse_path}; use rc_s3::S3Client; use serde::Serialize; use std::path::{Path, PathBuf}; @@ -273,39 +273,168 @@ async fn move_s3_to_s3( return ExitCode::Success; } - // Copy - match client.copy_object(src, dst).await { - Ok(info) => { - // Delete source - if let Err(e) = client.delete_object(src).await { - formatter.error(&format!("Copied but failed to delete source: {e}")); - return ExitCode::GeneralError; - } + // Recursive move for prefix/directory semantics. + if args.recursive { + let mut continuation_token: Option = None; + let mut moved_count = 0usize; + let mut error_count = 0usize; + let src_prefix = src.key.clone(); + + loop { + let list_opts = ListOptions { + recursive: true, + continuation_token: continuation_token.clone(), + ..Default::default() + }; + + let list_result = match client.list_objects(src, list_opts).await { + Ok(result) => result, + Err(e) => { + formatter.error(&format!("Failed to list source objects: {e}")); + return ExitCode::NetworkError; + } + }; + + for item in &list_result.items { + if item.is_dir { + continue; + } + + let relative = if src_prefix.is_empty() { + item.key.clone() + } else if let Some(rest) = item.key.strip_prefix(&src_prefix) { + rest.trim_start_matches('/').to_string() + } else { + item.key.clone() + }; - if formatter.is_json() { - let output = MvOutput { - status: "success", - source: src_display, - target: dst_display, - size_bytes: info.size_bytes, + let target_key = if dst.key.is_empty() { + relative.clone() + } else if dst.key.ends_with('/') { + format!("{}{}", dst.key, relative) + } else { + format!("{}/{}", dst.key, relative) }; - formatter.json(&output); - } else { - formatter.println(&format!( - "{src_display} -> {dst_display} ({})", - info.size_human.unwrap_or_default() - )); + + let src_obj = RemotePath::new(&src.alias, &src.bucket, &item.key); + let dst_obj = RemotePath::new(&dst.alias, &dst.bucket, &target_key); + let src_obj_display = src_obj.to_string(); + let dst_obj_display = dst_obj.to_string(); + + match client.copy_object(&src_obj, &dst_obj).await { + Ok(_) => match client.delete_object(&src_obj).await { + Ok(()) => { + moved_count += 1; + if !formatter.is_json() { + formatter + .println(&format!("{src_obj_display} -> {dst_obj_display}")); + } + } + Err(e) => { + error_count += 1; + formatter.error(&format!( + "Copied but failed to delete source '{src_obj_display}': {e}" + )); + if !args.continue_on_error { + return ExitCode::GeneralError; + } + } + }, + Err(e) => { + error_count += 1; + formatter.error(&format!( + "Failed to move '{src_obj_display}' -> '{dst_obj_display}': {e}" + )); + if !args.continue_on_error { + return ExitCode::NetworkError; + } + } + } + } + + if !list_result.truncated { + break; + } + continuation_token = match list_result.continuation_token.clone() { + Some(token) => Some(token), + None => { + formatter.error( + "Backend indicated truncated results but did not provide a continuation token; stopping to avoid an infinite loop.", + ); + return ExitCode::GeneralError; + } + }; + } + + if formatter.is_json() { + #[derive(Serialize)] + struct MvRecursiveOutput { + status: &'static str, + source: String, + target: String, + moved: usize, + errors: usize, } + + formatter.json(&MvRecursiveOutput { + status: if error_count == 0 { + "success" + } else { + "partial" + }, + source: src_display, + target: dst_display, + moved: moved_count, + errors: error_count, + }); + } else if error_count == 0 { + formatter.println(&format!("Moved {moved_count} object(s).")); + } else { + formatter.println(&format!( + "Moved {moved_count} object(s), {error_count} failed." + )); + } + + if error_count == 0 { ExitCode::Success + } else { + ExitCode::GeneralError } - Err(e) => { - let err_str = e.to_string(); - if err_str.contains("NotFound") || err_str.contains("NoSuchKey") { - formatter.error(&format!("Source not found: {src_display}")); - ExitCode::NotFound - } else { - formatter.error(&format!("Failed to move: {e}")); - ExitCode::NetworkError + } else { + // Copy + match client.copy_object(src, dst).await { + Ok(info) => { + // Delete source + if let Err(e) = client.delete_object(src).await { + formatter.error(&format!("Copied but failed to delete source: {e}")); + return ExitCode::GeneralError; + } + + if formatter.is_json() { + let output = MvOutput { + status: "success", + source: src_display, + target: dst_display, + size_bytes: info.size_bytes, + }; + formatter.json(&output); + } else { + formatter.println(&format!( + "{src_display} -> {dst_display} ({})", + info.size_human.unwrap_or_default() + )); + } + ExitCode::Success + } + Err(e) => { + let err_str = e.to_string(); + if err_str.contains("NotFound") || err_str.contains("NoSuchKey") { + formatter.error(&format!("Source not found: {src_display}")); + ExitCode::NotFound + } else { + formatter.error(&format!("Failed to move: {e}")); + ExitCode::NetworkError + } } } } diff --git a/crates/cli/tests/integration.rs b/crates/cli/tests/integration.rs index 3ce48ca..db209e7 100644 --- a/crates/cli/tests/integration.rs +++ b/crates/cli/tests/integration.rs @@ -11,7 +11,7 @@ //! -e RUSTFS_ROOT_PASSWORD=secretkey \ //! -e RUSTFS_ACCESS_KEY=accesskey \ //! -e RUSTFS_SECRET_KEY=secretkey \ -//! rustfs/rustfs:1.0.0-alpha.81 +//! rustfs/rustfs:latest //! //! # Run tests //! cargo test --features integration @@ -656,6 +656,88 @@ mod object_operations { cleanup_bucket(config_dir.path(), &bucket_name); } + #[test] + fn test_move_recursive_prefix_s3_to_s3() { + let (config_dir, bucket_name) = match setup_with_alias("mvrec") { + Some(v) => v, + None => { + eprintln!("Skipping: S3 test config not available"); + return; + } + }; + + let source_files = ["src/dir/a.txt", "src/dir/sub/b.txt"]; + for key in &source_files { + let tmp = tempfile::NamedTempFile::new().expect("Failed to create temp file"); + std::fs::write(tmp.path(), format!("content for {}", key)).expect("Failed to write"); + let output = run_rc( + &[ + "cp", + tmp.path().to_str().unwrap(), + &format!("test/{}/{}", bucket_name, key), + ], + config_dir.path(), + ); + assert!(output.status.success(), "Failed to upload {}", key); + } + + let output = run_rc( + &[ + "mv", + "--recursive", + "--continue-on-error", + &format!("test/{}/src/", bucket_name), + &format!("test/{}/dst/", bucket_name), + "--json", + ], + config_dir.path(), + ); + assert!( + output.status.success(), + "Failed to recursive move: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let stdout = String::from_utf8_lossy(&output.stdout); + let json: serde_json::Value = serde_json::from_str(&stdout).expect("Invalid JSON output"); + assert_eq!(json["status"], "success"); + assert_eq!(json["errors"], 0); + assert_eq!(json["moved"], 2); + + let output = run_rc( + &[ + "ls", + "--recursive", + &format!("test/{}/", bucket_name), + "--json", + ], + config_dir.path(), + ); + assert!( + output.status.success(), + "Failed to list bucket after recursive move" + ); + let listing = String::from_utf8_lossy(&output.stdout); + assert!( + listing.contains("dst/dir/a.txt"), + "Moved object dst/dir/a.txt not found" + ); + assert!( + listing.contains("dst/dir/sub/b.txt"), + "Moved object dst/dir/sub/b.txt not found" + ); + assert!( + !listing.contains("src/dir/a.txt"), + "Source object src/dir/a.txt should be removed" + ); + assert!( + !listing.contains("src/dir/sub/b.txt"), + "Source object src/dir/sub/b.txt should be removed" + ); + + cleanup_bucket(config_dir.path(), &bucket_name); + } + #[test] fn test_delete_object() { let (config_dir, bucket_name) = match setup_with_alias("delete") { @@ -2315,6 +2397,65 @@ mod tag_operations { } } +mod quota_operations { + use super::*; + + #[test] + fn test_bucket_quota_set_info_clear() { + let (config_dir, bucket_name) = match setup_with_alias("quota") { + Some(v) => v, + None => { + eprintln!("Skipping: S3 test config not available"); + return; + } + }; + + let bucket_path = format!("test/{}", bucket_name); + + let output = run_rc( + &["quota", "set", &bucket_path, "64MiB", "--json"], + config_dir.path(), + ); + assert!( + output.status.success(), + "Failed to set quota: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let output = run_rc( + &["quota", "info", &bucket_path, "--json"], + config_dir.path(), + ); + assert!( + output.status.success(), + "Failed to get quota info: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8_lossy(&output.stdout); + let json: serde_json::Value = serde_json::from_str(&stdout).expect("Invalid JSON output"); + assert_eq!(json["bucket"], bucket_name); + assert_eq!(json["quota"], 64 * 1024 * 1024); + assert_eq!(json["quotaType"], "HARD"); + + let output = run_rc( + &["quota", "clear", &bucket_path, "--json"], + config_dir.path(), + ); + assert!( + output.status.success(), + "Failed to clear quota: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8_lossy(&output.stdout); + let json: serde_json::Value = serde_json::from_str(&stdout).expect("Invalid JSON output"); + assert_eq!(json["bucket"], bucket_name); + assert!(json["quota"].is_null()); + assert_eq!(json["quotaType"], "HARD"); + + cleanup_bucket(config_dir.path(), &bucket_name); + } +} + mod alias_operations { use super::*; diff --git a/crates/core/src/admin/types.rs b/crates/core/src/admin/types.rs index 54b4735..23d1df5 100644 --- a/crates/core/src/admin/types.rs +++ b/crates/core/src/admin/types.rs @@ -350,9 +350,14 @@ pub struct BucketQuota { pub size: u64, /// Quota type (currently only HARD) + #[serde(default = "default_quota_type")] pub quota_type: String, } +fn default_quota_type() -> String { + "HARD".to_string() +} + #[cfg(test)] mod tests { use super::*; @@ -540,4 +545,15 @@ mod tests { assert_eq!(decoded.bucket, "my-bucket"); assert_eq!(decoded.quota, Some(1024)); } + + #[test] + fn test_bucket_quota_defaults_quota_type_when_missing() { + let json = r#"{"bucket":"my-bucket","quota":1024,"size":512}"#; + let decoded: BucketQuota = serde_json::from_str(json).unwrap(); + + assert_eq!(decoded.bucket, "my-bucket"); + assert_eq!(decoded.quota, Some(1024)); + assert_eq!(decoded.size, 512); + assert_eq!(decoded.quota_type, "HARD"); + } } diff --git a/crates/s3/src/client.rs b/crates/s3/src/client.rs index 671a2ac..6f0e6cd 100644 --- a/crates/s3/src/client.rs +++ b/crates/s3/src/client.rs @@ -19,7 +19,9 @@ use rc_core::{ }; use tokio::io::AsyncReadExt; -const SINGLE_PUT_OBJECT_MAX_SIZE: u64 = 5 * 1024 * 1024 * 1024; +/// Keep single-part uploads small to avoid backend incompatibilities with +/// streaming aws-chunked payloads. +const SINGLE_PUT_OBJECT_MAX_SIZE: u64 = crate::multipart::DEFAULT_PART_SIZE; /// Custom HTTP connector using reqwest, supporting insecure TLS (skip cert verification) /// and custom CA bundles. Used when `alias.insecure = true` or `alias.ca_bundle.is_some()`. @@ -196,6 +198,14 @@ impl S3Client { // Build S3 client with path-style addressing for compatibility let s3_config = aws_sdk_s3::config::Builder::from(&config) .force_path_style(alias.bucket_lookup == "path" || alias.bucket_lookup == "auto") + // Improve compatibility with S3-compatible backends by only sending request + // checksums when the operation explicitly requires them. + .request_checksum_calculation( + aws_sdk_s3::config::RequestChecksumCalculation::WhenRequired, + ) + .response_checksum_validation( + aws_sdk_s3::config::ResponseChecksumValidation::WhenRequired, + ) .build(); let client = aws_sdk_s3::Client::from_conf(s3_config); @@ -270,11 +280,10 @@ impl S3Client { content_type: Option<&str>, file_size: u64, ) -> Result { - let body = aws_sdk_s3::primitives::ByteStream::read_from() - .path(file_path) - .build() + let data = tokio::fs::read(file_path) .await - .map_err(|e| Error::General(format!("build request body: {e}")))?; + .map_err(|e| Error::General(format!("read file '{}': {e}", file_path.display())))?; + let body = aws_sdk_s3::primitives::ByteStream::from(data); let mut request = self .inner @@ -1188,7 +1197,7 @@ mod tests { assert!(!S3Client::should_use_multipart(0)); assert!(!S3Client::should_use_multipart(1024 * 1024)); assert!(!S3Client::should_use_multipart( - crate::multipart::DEFAULT_PART_SIZE + 1 + crate::multipart::DEFAULT_PART_SIZE )); assert!(!S3Client::should_use_multipart(SINGLE_PUT_OBJECT_MAX_SIZE)); } diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 5b3d871..3db8d82 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -7,7 +7,7 @@ services: # RustFS - S3-compatible object storage (Tier 1) rustfs: - image: rustfs/rustfs:1.0.0-alpha.81 + image: rustfs/rustfs:latest container_name: rc-test-rustfs ports: - "9000:9000" # S3 API