diff --git a/docs/reference/config.md b/docs/reference/config.md index d3922d304c..ff808d536c 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -223,6 +223,9 @@ The following settings are available: : The maximum number of retry attempts for failed retryable requests (default: `-1`). `aws.client.protocol` +: :::{deprecated} 25.06.0-edge + This option is no longer supported. + ::: : The protocol to use when connecting to AWS. Can be `http` or `https` (default: `'https'`). `aws.client.proxyHost` @@ -231,6 +234,11 @@ The following settings are available: `aws.client.proxyPort` : The port to use when connecting through a proxy. +`aws.client.proxyScheme` +: :::{versionadded} 25.06.0-edge + ::: +: The protocol scheme to use when connecting through a proxy. Can be `http` or `https` (default: `'http'`). + `aws.client.proxyUsername` : The user name to use when connecting through a proxy. @@ -249,9 +257,15 @@ The following settings are available: : The name of the signature algorithm to use for signing requests made by the client. `aws.client.socketSendBufferSizeHint` +: :::{deprecated} 25.06.0-edge + This option is no longer supported. + ::: : The Size hint (in bytes) for the low level TCP send buffer (default: `0`). `aws.client.socketRecvBufferSizeHint` +: :::{deprecated} 25.06.0-edge + This option is no longer supported. + ::: : The Size hint (in bytes) for the low level TCP receive buffer (default: `0`). `aws.client.socketTimeout` diff --git a/modules/nf-commons/src/main/nextflow/file/FileHelper.groovy b/modules/nf-commons/src/main/nextflow/file/FileHelper.groovy index 889970061a..83df9c6034 100644 --- a/modules/nf-commons/src/main/nextflow/file/FileHelper.groovy +++ b/modules/nf-commons/src/main/nextflow/file/FileHelper.groovy @@ -880,7 +880,9 @@ class FileHelper { @Override FileVisitResult visitFile(Path fullPath, BasicFileAttributes attrs) throws IOException { - final path = folder.relativize(fullPath) + final path = fullPath.isAbsolute() + ? folder.relativize(fullPath) + : fullPath log.trace "visitFiles > file=$path; includeFile=$includeFile; matches=${matcher.matches(path)}; isRegularFile=${attrs.isRegularFile()}" if (includeFile && matcher.matches(path) && (attrs.isRegularFile() || (options.followLinks == false && attrs.isSymbolicLink())) && (includeHidden || !isHidden(fullPath))) { @@ -912,7 +914,9 @@ class FileHelper { } static protected Path relativize0(Path folder, Path fullPath) { - def result = folder.relativize(fullPath) + final result = fullPath.isAbsolute() + ? folder.relativize(fullPath) + : fullPath String str if( folder.is(FileSystems.default) || !(str=result.toString()).endsWith('/') ) return result diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java index a4b1f85dc4..91918c6bbc 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java +++ b/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java @@ -79,6 +79,12 @@ The protocol (i.e. HTTP or HTTPS) to use when connecting to AWS. """) public int proxyPort; + @ConfigOption + @Description(""" + The protocol scheme to use when connecting through a proxy (http/https). + """) + public String proxyScheme; + @ConfigOption @Description(""" The user name to use when connecting through a proxy. diff --git a/plugins/nf-amazon/build.gradle b/plugins/nf-amazon/build.gradle index 46bc2c7b9d..9059e617ea 100644 --- a/plugins/nf-amazon/build.gradle +++ b/plugins/nf-amazon/build.gradle @@ -38,17 +38,20 @@ dependencies { compileOnly 'org.pf4j:pf4j:3.12.0' api ('javax.xml.bind:jaxb-api:2.4.0-b180830.0359') - api ('com.amazonaws:aws-java-sdk-s3:1.12.777') - api ('com.amazonaws:aws-java-sdk-ec2:1.12.777') - api ('com.amazonaws:aws-java-sdk-batch:1.12.777') - api ('com.amazonaws:aws-java-sdk-iam:1.12.777') - api ('com.amazonaws:aws-java-sdk-ecs:1.12.777') - api ('com.amazonaws:aws-java-sdk-logs:1.12.777') - api ('com.amazonaws:aws-java-sdk-codecommit:1.12.777') - api ('com.amazonaws:aws-java-sdk-sts:1.12.777') - api ('com.amazonaws:aws-java-sdk-ses:1.12.777') - api ('software.amazon.awssdk:sso:2.26.26') - api ('software.amazon.awssdk:ssooidc:2.26.26') + api ('software.amazon.awssdk:s3:2.31.64') + api ('software.amazon.awssdk:ec2:2.31.64') + api ('software.amazon.awssdk:batch:2.31.64') + api ('software.amazon.awssdk:iam:2.31.64') + api ('software.amazon.awssdk:ecs:2.31.64') + api ('software.amazon.awssdk:cloudwatchlogs:2.31.64') + api ('software.amazon.awssdk:codecommit:2.31.64') + api ('software.amazon.awssdk:sts:2.31.64') + api ('software.amazon.awssdk:ses:2.31.64') + api ('software.amazon.awssdk:sso:2.31.64') + api ('software.amazon.awssdk:ssooidc:2.31.64') + api ('software.amazon.awssdk:s3-transfer-manager:2.31.64') + api ('software.amazon.awssdk:apache-client:2.31.64') + api ('software.amazon.awssdk:aws-crt-client:2.31.64') constraints { api 'com.fasterxml.jackson.core:jackson-databind:2.12.7.1' diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/AmazonPlugin.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/AmazonPlugin.groovy index 6cd167bf2d..51533a26e4 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/AmazonPlugin.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/AmazonPlugin.groovy @@ -35,8 +35,6 @@ class AmazonPlugin extends BasePlugin { @Override void start() { super.start() - // disable aws sdk v1 warning - System.setProperty("aws.java.v1.disableDeprecationAnnouncement", "true") FileHelper.getOrInstallProvider(S3FileSystemProvider) } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/AwsClientFactory.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/AwsClientFactory.groovy index 2676104863..1be1b16c62 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/AwsClientFactory.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/AwsClientFactory.groovy @@ -16,46 +16,36 @@ package nextflow.cloud.aws -import com.amazonaws.AmazonClientException -import com.amazonaws.ClientConfiguration -import com.amazonaws.auth.AWSCredentialsProvider -import com.amazonaws.auth.AWSCredentialsProviderChain -import com.amazonaws.auth.AWSStaticCredentialsProvider -import com.amazonaws.auth.AnonymousAWSCredentials -import com.amazonaws.auth.BasicAWSCredentials -import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper -import com.amazonaws.auth.EnvironmentVariableCredentialsProvider -import com.amazonaws.auth.SystemPropertiesCredentialsProvider -import com.amazonaws.auth.WebIdentityTokenCredentialsProvider -import com.amazonaws.auth.profile.ProfileCredentialsProvider -import com.amazonaws.auth.profile.ProfilesConfigFile -import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration -import com.amazonaws.profile.path.AwsProfileFileLocationProvider -import com.amazonaws.regions.InstanceMetadataRegionProvider -import com.amazonaws.regions.Region -import com.amazonaws.regions.RegionUtils -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.AWSBatchClient -import com.amazonaws.services.batch.AWSBatchClientBuilder -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.AmazonEC2Client -import com.amazonaws.services.ec2.AmazonEC2ClientBuilder -import com.amazonaws.services.ecs.AmazonECS -import com.amazonaws.services.ecs.AmazonECSClientBuilder -import com.amazonaws.services.logs.AWSLogs -import com.amazonaws.services.logs.AWSLogsAsyncClientBuilder -import com.amazonaws.services.s3.AmazonS3 -import com.amazonaws.services.s3.AmazonS3ClientBuilder -import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder -import com.amazonaws.services.securitytoken.model.GetCallerIdentityRequest +import nextflow.cloud.aws.nio.util.S3AsyncClientConfiguration +import nextflow.cloud.aws.nio.util.S3SyncClientConfiguration +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration +import software.amazon.awssdk.core.exception.SdkClientException +import software.amazon.awssdk.http.SdkHttpClient +import software.amazon.awssdk.http.async.SdkAsyncHttpClient +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.regions.providers.InstanceProfileRegionProvider +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient +import software.amazon.awssdk.services.ec2.Ec2Client +import software.amazon.awssdk.services.ecs.EcsClient +import software.amazon.awssdk.services.s3.S3AsyncClient +import software.amazon.awssdk.services.s3.S3Client +import software.amazon.awssdk.services.s3.S3Configuration +import software.amazon.awssdk.services.sts.StsClient +import software.amazon.awssdk.services.sts.model.GetCallerIdentityRequest +import software.amazon.awssdk.services.sts.model.StsException import groovy.transform.CompileStatic import groovy.transform.Memoized import groovy.util.logging.Slf4j import nextflow.SysEnv import nextflow.cloud.aws.config.AwsConfig -import nextflow.cloud.aws.util.ConfigParser import nextflow.cloud.aws.util.S3CredentialsProvider -import nextflow.cloud.aws.util.SsoCredentialsProviderV1 import nextflow.exception.AbortOperationException /** * Implement a factory class for AWS client objects @@ -137,10 +127,10 @@ class AwsClientFactory { */ protected String fetchIamRole() { try { - def stsClient = AWSSecurityTokenServiceClientBuilder.defaultClient(); - return stsClient.getCallerIdentity(new GetCallerIdentityRequest()).getArn() + final stsClient = StsClient.create() + return stsClient.getCallerIdentity(GetCallerIdentityRequest.builder().build() as GetCallerIdentityRequest).arn(); } - catch( AmazonClientException e ) { + catch (StsException e) { log.trace "Unable to fetch IAM credentials -- Cause: ${e.message}" return null } @@ -156,11 +146,11 @@ class AwsClientFactory { */ private String fetchRegion() { try { - return new InstanceMetadataRegionProvider().getRegion() + return new InstanceProfileRegionProvider().getRegion().id(); } - catch (AmazonClientException e) { - log.debug("Cannot fetch AWS region", e as Throwable) - return null + catch (SdkClientException e) { + log.debug("Cannot fetch AWS region", e); + return null; } } @@ -171,148 +161,125 @@ class AwsClientFactory { * @return A {@link Region} corresponding to the specified region string */ private Region getRegionObj(String region) { - final result = RegionUtils.getRegion(region) + final result = Region.of(region) if( !result ) throw new IllegalArgumentException("Not a valid AWS region name: $region"); return result } /** - * Gets or lazily creates an {@link AmazonEC2Client} instance given the current + * Gets or lazily creates an {@link Ec2Client} instance given the current * configuration parameter * * @return - * An {@link AmazonEC2Client} instance + * An {@link Ec2Client} instance */ - synchronized AmazonEC2 getEc2Client() { - - final builder = AmazonEC2ClientBuilder - .standard() - .withRegion(region) - - final credentials = getCredentialsProvider0() - if( credentials ) - builder.withCredentials(credentials) - - return builder.build() + synchronized Ec2Client getEc2Client() { + return Ec2Client.builder() + .region(getRegionObj(region)) + .credentialsProvider(getCredentialsProvider0()) + .build() } /** - * Gets or lazily creates an {@link AWSBatchClient} instance given the current + * Gets or lazily creates an {@link BatchClient} instance given the current * configuration parameter * * @return - * An {@link AWSBatchClient} instance + * An {@link BatchClient} instance */ @Memoized - AWSBatch getBatchClient() { - final builder = AWSBatchClientBuilder - .standard() - .withRegion(region) - - final credentials = getCredentialsProvider0() - if( credentials ) - builder.withCredentials(credentials) - - return builder.build() + BatchClient getBatchClient() { + return BatchClient.builder() + .region(getRegionObj(region)) + .credentialsProvider(getCredentialsProvider0()) + .build() } @Memoized - AmazonECS getEcsClient() { + EcsClient getEcsClient() { + return EcsClient.builder() + .region(getRegionObj(region)) + .credentialsProvider(getCredentialsProvider0()) + .build() + } - final builder = AmazonECSClientBuilder - .standard() - .withRegion(region) + @Memoized + CloudWatchLogsClient getLogsClient() { + return CloudWatchLogsClient.builder().region(getRegionObj(region)).credentialsProvider(getCredentialsProvider0()).build() + } - final credentials = getCredentialsProvider0() - if( credentials ) - builder.withCredentials(credentials) + S3Client getS3Client(S3SyncClientConfiguration s3ClientConfig, boolean global = false) { + final SdkHttpClient.Builder httpClientBuilder = s3ClientConfig.getHttpClientBuilder() + final ClientOverrideConfiguration overrideConfiguration = s3ClientConfig.getClientOverrideConfiguration() + final builder = S3Client.builder() + .crossRegionAccessEnabled(global) + .credentialsProvider(config.s3Config.anonymous ? AnonymousCredentialsProvider.create() : new S3CredentialsProvider(getCredentialsProvider0())) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(config.s3Config.pathStyleAccess) + .multiRegionEnabled(global) + .build()) - return builder.build() - } + if( config.s3Config.endpoint ) + builder.endpointOverride(URI.create(config.s3Config.endpoint)) - @Memoized - AWSLogs getLogsClient() { + // AWS SDK v2 region must be always set, even when endpoint is overridden + builder.region(getRegionObj(region)) - final builder = AWSLogsAsyncClientBuilder - .standard() - .withRegion(region) + if( httpClientBuilder != null ) + builder.httpClientBuilder(httpClientBuilder) - final credentials = getCredentialsProvider0() - if( credentials ) - builder.withCredentials(credentials) + if( overrideConfiguration != null ) + builder.overrideConfiguration(overrideConfiguration) return builder.build() } - AmazonS3 getS3Client(ClientConfiguration clientConfig=null, boolean global=false) { - final builder = AmazonS3ClientBuilder - .standard() - .withPathStyleAccessEnabled(config.s3Config.pathStyleAccess) - .withForceGlobalBucketAccessEnabled(global) - - final endpoint = config.s3Config.endpoint - if( endpoint ) - builder.withEndpointConfiguration(new EndpointConfiguration(endpoint, region)) + S3AsyncClient getS3AsyncClient(S3AsyncClientConfiguration s3ClientConfig, Long uploadChunkSize, boolean global = false) { + final httpClientBuilder = s3ClientConfig.getHttpClientBuilder() + final overrideConfiguration = s3ClientConfig.getClientOverrideConfiguration() + final builder = S3AsyncClient.builder() + .crossRegionAccessEnabled(global) + .credentialsProvider( + config.s3Config.anonymous + ? AnonymousCredentialsProvider.create() + : new S3CredentialsProvider(getCredentialsProvider0()) + ) + .serviceConfiguration( + S3Configuration.builder() + .pathStyleAccessEnabled(config.s3Config.pathStyleAccess) + .build() + ) + + if( uploadChunkSize > 0 ) + builder.multipartConfiguration(cfg -> cfg.minimumPartSizeInBytes(uploadChunkSize)) + + if( config.s3Config.endpoint ) + builder.endpointOverride(URI.create(config.s3Config.endpoint)) else - builder.withRegion(region) + builder.region(getRegionObj(region)) - final credentials = config.s3Config.anonymous - ? new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()) - : new S3CredentialsProvider(getCredentialsProvider0()) - builder.withCredentials(credentials) + if( httpClientBuilder != null ) + builder.httpClientBuilder(httpClientBuilder) - if( clientConfig ) - builder.withClientConfiguration(clientConfig) + if( overrideConfiguration != null ) + builder.overrideConfiguration(overrideConfiguration) return builder.build() } - protected AWSCredentialsProvider getCredentialsProvider0() { + protected AwsCredentialsProvider getCredentialsProvider0() { if( accessKey && secretKey ) { - final creds = new BasicAWSCredentials(accessKey, secretKey) - return new AWSStaticCredentialsProvider(creds) + return StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKey, secretKey)) } if( profile ) { - return new AWSCredentialsProviderChain(List.of( - new ProfileCredentialsProvider(configFile(), profile), - new SsoCredentialsProviderV1(profile))) + return ProfileCredentialsProvider.builder() + .profileName(profile) + .build() } - return new AWSCredentialsProviderChain(List.of( - new EnvironmentVariableCredentialsProvider(), - new SystemPropertiesCredentialsProvider(), - WebIdentityTokenCredentialsProvider.create(), - new ProfileCredentialsProvider(configFile(), null), - new SsoCredentialsProviderV1(), - new EC2ContainerCredentialsProviderWrapper())) + return DefaultCredentialsProvider.create() } - static ProfilesConfigFile configFile() { - final creds = AwsProfileFileLocationProvider.DEFAULT_CREDENTIALS_LOCATION_PROVIDER.getLocation() - final config = AwsProfileFileLocationProvider.DEFAULT_CONFIG_LOCATION_PROVIDER.getLocation() - if( creds && config && SysEnv.get('NXF_DISABLE_AWS_CONFIG_MERGE')!='true' ) { - log.debug "Merging AWS credentials file '$creds' and config file '$config'" - final parser = new ConfigParser() - // add the credentials first because it has higher priority - parser.parseConfig(creds.text) - // add also the content of config file - parser.parseConfig(config.text) - final temp = File.createTempFile('aws','config') - // merge into a temporary file - temp.deleteOnExit() - temp.text = parser.text() - return new ProfilesConfigFile(temp.absolutePath) - } - if( creds ) { - log.debug "Using AWS credentials file '$creds'" - return new ProfilesConfigFile(creds) - } - if( config ) { - log.debug "Using AWS config file '$config'" - return new ProfilesConfigFile(config) - } - return null - } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy index 3b00d129d7..8d1e86a5fc 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy @@ -20,10 +20,10 @@ import java.nio.file.Path import java.util.concurrent.TimeUnit import java.util.concurrent.TimeoutException -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.model.AWSBatchException -import com.amazonaws.services.ecs.model.AccessDeniedException -import com.amazonaws.services.logs.model.ResourceNotFoundException +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.BatchException +import software.amazon.awssdk.services.ecs.model.AccessDeniedException +import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceNotFoundException import groovy.transform.CompileDynamic import groovy.transform.CompileStatic import groovy.transform.PackageScope @@ -177,7 +177,7 @@ class AwsBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExec } @PackageScope - AWSBatch getClient() { + BatchClient getClient() { client } @@ -238,7 +238,7 @@ class AwsBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExec final size = Runtime.runtime.availableProcessors() * 5 final opts = new ThrottlingExecutor.Options() - .retryOn { Throwable t -> t instanceof AWSBatchException && (t.errorCode=='TooManyRequestsException' || t.statusCode in RETRYABLE_STATUS) } + .retryOn { Throwable t -> t instanceof BatchException && (t.awsErrorDetails().errorCode() == 'TooManyRequestsException' || t.statusCode() in RETRYABLE_STATUS) } .onFailure { Throwable t -> session?.abort(t) } .onRateLimitChange { RateUnit rate -> logRateLimitChange(rate) } .withRateLimit(limit) diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchHelper.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchHelper.groovy index 0e5672ce57..36bb2895ec 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchHelper.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchHelper.groovy @@ -16,20 +16,20 @@ package nextflow.cloud.aws.batch -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.model.DescribeComputeEnvironmentsRequest -import com.amazonaws.services.batch.model.DescribeJobQueuesRequest -import com.amazonaws.services.batch.model.DescribeJobsRequest -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeInstancesRequest -import com.amazonaws.services.ec2.model.Instance -import com.amazonaws.services.ecs.AmazonECS -import com.amazonaws.services.ecs.model.DescribeContainerInstancesRequest -import com.amazonaws.services.ecs.model.DescribeTasksRequest -import com.amazonaws.services.ecs.model.InvalidParameterException -import com.amazonaws.services.logs.AWSLogs -import com.amazonaws.services.logs.model.GetLogEventsRequest -import com.amazonaws.services.logs.model.OutputLogEvent +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.DescribeComputeEnvironmentsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobQueuesRequest +import software.amazon.awssdk.services.batch.model.DescribeJobsRequest +import software.amazon.awssdk.services.ec2.Ec2Client +import software.amazon.awssdk.services.ec2.model.DescribeInstancesRequest +import software.amazon.awssdk.services.ec2.model.Instance +import software.amazon.awssdk.services.ecs.EcsClient +import software.amazon.awssdk.services.ecs.model.DescribeContainerInstancesRequest +import software.amazon.awssdk.services.ecs.model.DescribeTasksRequest +import software.amazon.awssdk.services.ecs.model.InvalidParameterException +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest +import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent import groovy.transform.CompileStatic import groovy.transform.Memoized import groovy.util.logging.Slf4j @@ -46,25 +46,25 @@ import nextflow.cloud.types.PriceModel class AwsBatchHelper { private AwsClientFactory factory - private AWSBatch batchClient + private BatchClient batchClient - AwsBatchHelper(AWSBatch batchClient, AwsClientFactory factory) { + AwsBatchHelper(BatchClient batchClient, AwsClientFactory factory) { this.batchClient = batchClient this.factory = factory } @Memoized - private AmazonECS getEcsClient() { + private EcsClient getEcsClient() { return factory.getEcsClient() } @Memoized - private AmazonEC2 getEc2Client() { + private Ec2Client getEc2Client() { return factory.getEc2Client() } @Memoized - private AWSLogs getLogsClient() { + private CloudWatchLogsClient getLogsClient() { return factory.getLogsClient() } @@ -75,20 +75,26 @@ class AwsBatchHelper { } private List getClusterArnByCompEnvNames(List envNames) { - final req = new DescribeComputeEnvironmentsRequest().withComputeEnvironments(envNames) + final req = DescribeComputeEnvironmentsRequest.builder() + .computeEnvironments(envNames) + .build() as DescribeComputeEnvironmentsRequest batchClient .describeComputeEnvironments(req) - .getComputeEnvironments() - *.getEcsClusterArn() + .computeEnvironments() + *.ecsClusterArn() } private List getComputeEnvByQueueName(String queueName) { - final req = new DescribeJobQueuesRequest().withJobQueues(queueName) + final req = DescribeJobQueuesRequest.builder() + .jobQueues(queueName) + .build() as DescribeJobQueuesRequest + final resp = batchClient.describeJobQueues(req) - final result = new ArrayList(10) - for (def queue : resp.getJobQueues()) { - for (def order : queue.getComputeEnvironmentOrder()) { - result.add(order.getComputeEnvironment()) + + final result = new ArrayList(10) + for (final queue : resp.jobQueues()) { + for (final order : queue.computeEnvironmentOrder()) { + result.add(order.computeEnvironment()) } } return result @@ -101,14 +107,15 @@ class AwsBatchHelper { } private String getContainerIdByClusterAndTaskArn(String clusterArn, String taskArn) { - final describeTaskReq = new DescribeTasksRequest() - .withCluster(clusterArn) - .withTasks(taskArn) + final describeTaskReq = DescribeTasksRequest.builder() + .cluster(clusterArn) + .tasks(taskArn) + .build() try { final describeTasksResult = ecsClient.describeTasks(describeTaskReq) final containers = - describeTasksResult.getTasks() - *.getContainerInstanceArn() + describeTasksResult.tasks() + *.containerInstanceArn() if( containers.size()==1 ) { return containers.get(0) } @@ -126,13 +133,14 @@ class AwsBatchHelper { } private String getInstanceIdByClusterAndContainerId(String clusterArn, String containerId) { - final describeContainerReq = new DescribeContainerInstancesRequest() - .withCluster(clusterArn) - .withContainerInstances(containerId) + final describeContainerReq = DescribeContainerInstancesRequest.builder() + .cluster(clusterArn) + .containerInstances(containerId) + .build() final instanceIds = ecsClient .describeContainerInstances(describeContainerReq) - .getContainerInstances() - *.getEc2InstanceId() + .containerInstances() + *.ec2InstanceId() if( !instanceIds ) { log.debug "Unable to find EC2 instance id for clusterArn=$clusterArn and containerId=$containerId" return null @@ -146,22 +154,24 @@ class AwsBatchHelper { @Memoized(maxCacheSize = 1_000) private CloudMachineInfo getInfoByInstanceId(String instanceId) { assert instanceId - final req = new DescribeInstancesRequest() .withInstanceIds(instanceId) - final res = ec2Client .describeInstances(req) .getReservations() [0] - final Instance instance = res ? res.getInstances() [0] : null + final req = DescribeInstancesRequest.builder() + .instanceIds(instanceId) + .build() + final res = ec2Client.describeInstances(req).reservations() [0] + final Instance instance = res ? res.instances() [0] : null if( !instance ) { log.debug "Unable to find cloud machine info for instanceId=$instanceId" return null } new CloudMachineInfo( - instance.getInstanceType(), - instance.getPlacement().getAvailabilityZone(), + instance.instanceType().toString(), + instance.placement().availabilityZone(), getPrice(instance)) } private PriceModel getPrice(Instance instance) { - instance.getInstanceLifecycle()=='spot' ? PriceModel.spot : PriceModel.standard + instance.instanceLifecycle()=='spot' ? PriceModel.spot : PriceModel.standard } CloudMachineInfo getCloudInfoByQueueAndTaskArn(String queue, String taskArn) { @@ -177,11 +187,13 @@ class AwsBatchHelper { } protected String getLogStreamId(String jobId) { - final request = new DescribeJobsRequest() .withJobs(jobId) + final request = DescribeJobsRequest.builder() + .jobs(jobId) + .build() final response = batchClient.describeJobs(request) - if( response.jobs ) { - final detail = response.jobs[0] - return detail.container.logStreamName + if( response.jobs() ) { + final detail = response.jobs()[0] + return detail.container().logStreamName() } else { log.debug "Unable to find info for batch job id=$jobId" @@ -205,14 +217,15 @@ class AwsBatchHelper { return null } - final logRequest = new GetLogEventsRequest() - .withLogGroupName(groupName ?: "/aws/batch/job") - .withLogStreamName(streamId) + final logRequest = GetLogEventsRequest.builder() + .logGroupName(groupName ?: "/aws/batch/job") + .logStreamName(streamId) + .build() final result = new StringBuilder() - final resp = logsClient .getLogEvents(logRequest) - for( OutputLogEvent it : resp.events ) { - result.append(it.getMessage()).append('\n') + final resp = logsClient.getLogEvents(logRequest) + for( OutputLogEvent it : resp.events() ) { + result.append(it.message()).append('\n') } return result.toString() diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchProxy.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchProxy.groovy index 4bcc35d8b5..243aac605a 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchProxy.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchProxy.groovy @@ -16,7 +16,7 @@ package nextflow.cloud.aws.batch -import com.amazonaws.services.batch.AWSBatch +import software.amazon.awssdk.services.batch.BatchClient import nextflow.util.ClientProxyThrottler import nextflow.util.ThrottlingExecutor /** @@ -27,12 +27,12 @@ import nextflow.util.ThrottlingExecutor * * @author Paolo Di Tommaso */ -class AwsBatchProxy extends ClientProxyThrottler { +class AwsBatchProxy extends ClientProxyThrottler { @Delegate(deprecated=true) - private AWSBatch target + private BatchClient target - AwsBatchProxy(AWSBatch client, ThrottlingExecutor executor) { + AwsBatchProxy(BatchClient client, ThrottlingExecutor executor) { super(client, executor, [describeJobs: 10 as Byte]) // note: use higher priority for `describeJobs` invocations this.target = client } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy index 3aff103736..161c7c958c 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy @@ -16,45 +16,47 @@ package nextflow.cloud.aws.batch -import static nextflow.cloud.aws.batch.AwsContainerOptionsMapper.* +import static AwsContainerOptionsMapper.* import java.nio.file.Path import java.nio.file.Paths import java.time.Instant -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.model.AWSBatchException -import com.amazonaws.services.batch.model.ArrayProperties -import com.amazonaws.services.batch.model.AssignPublicIp -import com.amazonaws.services.batch.model.AttemptContainerDetail -import com.amazonaws.services.batch.model.ClientException -import com.amazonaws.services.batch.model.ContainerOverrides -import com.amazonaws.services.batch.model.ContainerProperties -import com.amazonaws.services.batch.model.DescribeJobDefinitionsRequest -import com.amazonaws.services.batch.model.DescribeJobDefinitionsResult -import com.amazonaws.services.batch.model.DescribeJobsRequest -import com.amazonaws.services.batch.model.DescribeJobsResult -import com.amazonaws.services.batch.model.EphemeralStorage -import com.amazonaws.services.batch.model.EvaluateOnExit -import com.amazonaws.services.batch.model.Host -import com.amazonaws.services.batch.model.JobDefinition -import com.amazonaws.services.batch.model.JobDefinitionType -import com.amazonaws.services.batch.model.JobDetail -import com.amazonaws.services.batch.model.JobTimeout -import com.amazonaws.services.batch.model.KeyValuePair -import com.amazonaws.services.batch.model.LogConfiguration -import com.amazonaws.services.batch.model.MountPoint -import com.amazonaws.services.batch.model.NetworkConfiguration -import com.amazonaws.services.batch.model.RegisterJobDefinitionRequest -import com.amazonaws.services.batch.model.RegisterJobDefinitionResult -import com.amazonaws.services.batch.model.ResourceRequirement -import com.amazonaws.services.batch.model.ResourceType -import com.amazonaws.services.batch.model.RetryStrategy -import com.amazonaws.services.batch.model.RuntimePlatform -import com.amazonaws.services.batch.model.SubmitJobRequest -import com.amazonaws.services.batch.model.SubmitJobResult -import com.amazonaws.services.batch.model.TerminateJobRequest -import com.amazonaws.services.batch.model.Volume +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.BatchException +import software.amazon.awssdk.services.batch.model.ArrayProperties +import software.amazon.awssdk.services.batch.model.AssignPublicIp +import software.amazon.awssdk.services.batch.model.AttemptContainerDetail +import software.amazon.awssdk.services.batch.model.ClientException +import software.amazon.awssdk.services.batch.model.ContainerOverrides +import software.amazon.awssdk.services.batch.model.ContainerProperties +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsResponse +import software.amazon.awssdk.services.batch.model.DescribeJobsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobsResponse +import software.amazon.awssdk.services.batch.model.EphemeralStorage +import software.amazon.awssdk.services.batch.model.EvaluateOnExit +import software.amazon.awssdk.services.batch.model.Host +import software.amazon.awssdk.services.batch.model.JobDefinition +import software.amazon.awssdk.services.batch.model.JobDefinitionType +import software.amazon.awssdk.services.batch.model.JobDetail +import software.amazon.awssdk.services.batch.model.JobStatus +import software.amazon.awssdk.services.batch.model.JobTimeout +import software.amazon.awssdk.services.batch.model.KeyValuePair +import software.amazon.awssdk.services.batch.model.LogConfiguration +import software.amazon.awssdk.services.batch.model.MountPoint +import software.amazon.awssdk.services.batch.model.NetworkConfiguration +import software.amazon.awssdk.services.batch.model.PlatformCapability +import software.amazon.awssdk.services.batch.model.RegisterJobDefinitionRequest +import software.amazon.awssdk.services.batch.model.RegisterJobDefinitionResponse +import software.amazon.awssdk.services.batch.model.ResourceRequirement +import software.amazon.awssdk.services.batch.model.ResourceType +import software.amazon.awssdk.services.batch.model.RetryStrategy +import software.amazon.awssdk.services.batch.model.RuntimePlatform +import software.amazon.awssdk.services.batch.model.SubmitJobRequest +import software.amazon.awssdk.services.batch.model.SubmitJobResponse +import software.amazon.awssdk.services.batch.model.TerminateJobRequest +import software.amazon.awssdk.services.batch.model.Volume import groovy.transform.Canonical import groovy.transform.CompileStatic import groovy.transform.Memoized @@ -103,7 +105,7 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler job=$jobId; work-dir=${task.getWorkDirStr()}" } @@ -417,7 +423,7 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler MAX_ATTEMPTS) + if( e.statusCode() != 404 || attempt++ > MAX_ATTEMPTS) throw e final delay = (Math.pow(DEFAULT_BACK_OFF_BASE, attempt) as long) * DEFAULT_BACK_OFF_DELAY @@ -482,9 +488,10 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler mountsMap, ContainerProperties container) { + protected void addVolumeMountsToContainer(Map mountsMap, ContainerProperties.Builder container) { final mounts = new ArrayList(mountsMap.size()) final volumes = new ArrayList(mountsMap.size()) for( Map.Entry entry : mountsMap.entrySet() ) { @@ -645,22 +653,21 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler3 ) throw new IllegalArgumentException("Not a valid volume mount syntax: $entry.value") - def mount = new MountPoint() - .withSourceVolume(mountName) - .withContainerPath(hostPath) - .withReadOnly(readOnly) + def mount = MountPoint.builder() + .sourceVolume(mountName) + .containerPath(hostPath) + .readOnly(readOnly).build() mounts << mount - def vol = new Volume() - .withName(mountName) - .withHost(new Host() - .withSourcePath(containerPath)) + def vol = Volume.builder() + .name(mountName) + .host(Host.builder().sourcePath(containerPath).build()).build() volumes << vol } if( mountsMap ) { - container.setMountPoints(mounts) - container.setVolumes(volumes) + container.mountPoints(mounts) + container.volumes(volumes) } } @@ -673,17 +680,19 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler it.status == 'ACTIVE' && it.parameters?.'nf-token' == jobId } - return job ? "$name:$job.revision" : null + def job = jobs.find { JobDefinition it -> it.status() == 'ACTIVE' && it.parameters()?.'nf-token' == jobId } + return job ? "$name:${job.revision()}" : null } /** @@ -692,13 +701,15 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler0 ) { // retry the job when an Ec2 instance is terminate - final cond1 = new EvaluateOnExit().withAction('RETRY').withOnStatusReason('Host EC2*') + final cond1 = EvaluateOnExit.builder().action('RETRY').onStatusReason('Host EC2*').build() // the exit condition prevent to retry for other reason and delegate // instead to nextflow error strategy the handling of the error - final cond2 = new EvaluateOnExit().withAction('EXIT').withOnReason('*') - final retry = new RetryStrategy() - .withAttempts( attempts ) - .withEvaluateOnExit(cond1, cond2) - result.setRetryStrategy(retry) + final cond2 = EvaluateOnExit.builder().action('EXIT').onReason('*').build() + final retry = RetryStrategy.builder() + .attempts( attempts ) + .evaluateOnExit(cond1, cond2) + .build() + builder.retryStrategy(retry) } // set task timeout @@ -798,43 +810,43 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler(5) - final container = new ContainerOverrides() - container.command = getSubmitCommand() + final container = ContainerOverrides.builder() + container.command(getSubmitCommand()) // set the task memory final cpus = task.config.getCpus() final mem = task.config.getMemory() if( mem ) { final mega = opts.fargateMode ? normaliseFargateMem(cpus, mem) : mem.toMega() if( mega >= 4 ) - resources << new ResourceRequirement().withType(ResourceType.MEMORY).withValue(mega.toString()) + resources << ResourceRequirement.builder().type(ResourceType.MEMORY).value(mega.toString()).build() else log.warn "Ignoring task ${task.lazyName()} memory directive: ${task.config.getMemory()} -- AWS Batch job memory request cannot be lower than 4 MB" } // set the task cpus if( cpus > 1 ) - resources << new ResourceRequirement().withType(ResourceType.VCPU).withValue(task.config.getCpus().toString()) + resources << ResourceRequirement.builder().type(ResourceType.VCPU).value(task.config.getCpus().toString()).build() final accelerator = task.config.getAccelerator() if( accelerator ) { if( accelerator.type ) log.warn1 "Ignoring task ${task.lazyName()} accelerator type: ${accelerator.type} -- AWS Batch doesn't support accelerator type in job definition" - resources << new ResourceRequirement().withType(ResourceType.GPU).withValue(accelerator.request.toString()) + resources << ResourceRequirement.builder().type(ResourceType.GPU).value(accelerator.request.toString()).build() } if( resources ) - container.withResourceRequirements(resources) + container.resourceRequirements(resources) // set the environment def vars = getEnvironmentVars() if( vars ) - container.setEnvironment(vars) + container.environment(vars) - result.setContainerOverrides(container) + builder.containerOverrides(container.build()) // set the array properties if( task instanceof TaskArrayRun ) { @@ -843,10 +855,10 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler 10_000 ) throw new IllegalArgumentException("Job arrays on AWS Batch may not have more than 10,000 tasks") - result.setArrayProperties(new ArrayProperties().withSize(arraySize)) + builder.arrayProperties(ArrayProperties.builder().size(arraySize).build()) } - return result + return builder.build() } /** @@ -855,16 +867,16 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler getEnvironmentVars() { List vars = [] if( this.environment?.containsKey('NXF_DEBUG') ) - vars << new KeyValuePair().withName('NXF_DEBUG').withValue(this.environment['NXF_DEBUG']) + vars << KeyValuePair.builder().name('NXF_DEBUG').value(this.environment['NXF_DEBUG']).build() if( this.getAwsOptions().retryMode && this.getAwsOptions().retryMode in AwsOptions.VALID_RETRY_MODES) - vars << new KeyValuePair().withName('AWS_RETRY_MODE').withValue(this.getAwsOptions().retryMode) + vars << KeyValuePair.builder().name('AWS_RETRY_MODE').value(this.getAwsOptions().retryMode).build() if( this.getAwsOptions().maxTransferAttempts ) { - vars << new KeyValuePair().withName('AWS_MAX_ATTEMPTS').withValue(this.getAwsOptions().maxTransferAttempts as String) - vars << new KeyValuePair().withName('AWS_METADATA_SERVICE_NUM_ATTEMPTS').withValue(this.getAwsOptions().maxTransferAttempts as String) + vars << KeyValuePair.builder().name('AWS_MAX_ATTEMPTS').value(this.getAwsOptions().maxTransferAttempts as String).build() + vars << KeyValuePair.builder().name('AWS_METADATA_SERVICE_NUM_ATTEMPTS').value(this.getAwsOptions().maxTransferAttempts as String).build() } if( fusionEnabled() ) { for(Map.Entry it : fusionLauncher().fusionEnv()) { - vars << new KeyValuePair().withName(it.key).withValue(it.value) + vars << KeyValuePair.builder().name(it.key).value(it.value).build() } } return vars @@ -910,42 +922,42 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler=500 ) + catch (BatchException e) { + if( e.awsErrorDetails().sdkHttpResponse().statusCode() >= 500 ) // raise a process exception so that nextflow can try to recover it - throw new ProcessSubmitException("Failed to submit job: ${req.jobName} - Reason: ${e.errorCode}", e) + throw new ProcessSubmitException("Failed to submit job: ${req.jobName()} - Reason: ${e.awsErrorDetails().errorCode()}", e) else // status code < 500 are not expected to be recoverable, just throw it again throw e } } - static private DescribeJobDefinitionsResult describeJobDefinitions0(AWSBatch client, DescribeJobDefinitionsRequest req) { + static private DescribeJobDefinitionsResponse describeJobDefinitions0(BatchClient client, DescribeJobDefinitionsRequest req) { try { client.describeJobDefinitions(req) } - catch (AWSBatchException e) { - if( e.statusCode>=500 ) + catch (BatchException e) { + if( e.awsErrorDetails().sdkHttpResponse().statusCode() >= 500 ) // raise a process exception so that nextflow can try to recover it - throw new ProcessSubmitException("Failed to describe job definitions: ${req.jobDefinitions} - Reason: ${e.errorCode}", e) + throw new ProcessSubmitException("Failed to describe job definitions: ${req.jobDefinitions()} - Reason: ${e.awsErrorDetails().errorCode()}", e) else // status code < 500 are not expected to be recoverable, just throw it again throw e } } - static private RegisterJobDefinitionResult createJobDef0(AWSBatch client, RegisterJobDefinitionRequest req) { + static private RegisterJobDefinitionResponse createJobDef0(BatchClient client, RegisterJobDefinitionRequest req) { try { return client.registerJobDefinition(req) } - catch (AWSBatchException e) { - if( e.statusCode>=500 ) + catch (BatchException e) { + if( e.awsErrorDetails().sdkHttpResponse().statusCode() >= 500 ) // raise a process exception so that nextflow can try to recover it - throw new ProcessSubmitException("Failed to register job definition: ${req.jobDefinitionName} - Reason: ${e.errorCode}", e) + throw new ProcessSubmitException("Failed to register job definition: ${req.jobDefinitionName()} - Reason: ${e.awsErrorDetails().errorCode()}", e) else // status code < 500 are not expected to be recoverable, just throw it again throw e diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsContainerOptionsMapper.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsContainerOptionsMapper.groovy index fc889d78fe..be0adfbbfb 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsContainerOptionsMapper.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsContainerOptionsMapper.groovy @@ -15,11 +15,11 @@ */ package nextflow.cloud.aws.batch -import com.amazonaws.services.batch.model.ContainerProperties -import com.amazonaws.services.batch.model.KeyValuePair -import com.amazonaws.services.batch.model.LinuxParameters -import com.amazonaws.services.batch.model.Tmpfs -import com.amazonaws.services.batch.model.Ulimit +import software.amazon.awssdk.services.batch.model.ContainerProperties +import software.amazon.awssdk.services.batch.model.KeyValuePair +import software.amazon.awssdk.services.batch.model.LinuxParameters +import software.amazon.awssdk.services.batch.model.Tmpfs +import software.amazon.awssdk.services.batch.model.Ulimit import groovy.transform.CompileStatic import nextflow.util.CmdLineOptionMap import nextflow.util.MemoryUnit @@ -37,78 +37,81 @@ class AwsContainerOptionsMapper { @Deprecated static ContainerProperties createContainerOpts(CmdLineOptionMap options) { - createContainerProperties(options) + return createContainerProperties(options) } static ContainerProperties createContainerProperties(CmdLineOptionMap options) { - final containerProperties = new ContainerProperties() + final builder = ContainerProperties.builder() + addCmdOptions(options, builder) + return builder.build() + } + + static void addCmdOptions(CmdLineOptionMap options, ContainerProperties.Builder builder){ if ( options?.hasOptions() ) { - checkPrivileged(options, containerProperties) - checkEnvVars(options, containerProperties) - checkUser(options, containerProperties) - checkReadOnly(options, containerProperties) - checkUlimit(options, containerProperties) + checkPrivileged(options, builder) + checkEnvVars(options, builder) + checkUser(options, builder) + checkReadOnly(options, builder) + checkUlimit(options, builder) LinuxParameters params = checkLinuxParameters(options) if ( params != null ) - containerProperties.setLinuxParameters(params) + builder.linuxParameters(params) } - return containerProperties } - protected static void checkPrivileged(CmdLineOptionMap options, ContainerProperties containerProperties) { + protected static void checkPrivileged(CmdLineOptionMap options, ContainerProperties.Builder containerProperties) { if ( findOptionWithBooleanValue(options, 'privileged') ) - containerProperties.setPrivileged(true); + containerProperties.privileged(true); } - protected static void checkEnvVars(CmdLineOptionMap options, ContainerProperties containerProperties) { + protected static void checkEnvVars(CmdLineOptionMap options, ContainerProperties.Builder containerProperties) { final keyValuePairs = new ArrayList() List values = findOptionWithMultipleValues(options, 'env') values.addAll(findOptionWithMultipleValues(options, 'e')) for( String it : values ) { final tokens = it.tokenize('=') - keyValuePairs << new KeyValuePair().withName(tokens[0]).withValue(tokens.size() == 2 ? tokens[1] : null) + keyValuePairs << KeyValuePair.builder().name(tokens[0]).value(tokens.size() == 2 ? tokens[1] : null).build() } if ( keyValuePairs ) - containerProperties.setEnvironment(keyValuePairs) + containerProperties.environment(keyValuePairs) } - protected static void checkUser(CmdLineOptionMap options, ContainerProperties containerProperties) { + protected static void checkUser(CmdLineOptionMap options, ContainerProperties.Builder containerProperties) { String user = findOptionWithSingleValue(options, 'u') if ( !user) user = findOptionWithSingleValue(options, 'user') if ( user ) - containerProperties.setUser(user) + containerProperties.user(user) } - protected static void checkReadOnly(CmdLineOptionMap options, ContainerProperties containerProperties) { + protected static void checkReadOnly(CmdLineOptionMap options, ContainerProperties.Builder containerProperties) { if ( findOptionWithBooleanValue(options, 'read-only') ) - containerProperties.setReadonlyRootFilesystem(true); + containerProperties.readonlyRootFilesystem(true); } - protected static void checkUlimit(CmdLineOptionMap options, ContainerProperties containerProperties) { + protected static void checkUlimit(CmdLineOptionMap options, ContainerProperties.Builder containerProperties) { final ulimits = new ArrayList() findOptionWithMultipleValues(options, 'ulimit').each { value -> final tokens = value.tokenize('=') final limits = tokens[1].tokenize(':') if ( limits.size() > 1 ) - ulimits << new Ulimit().withName(tokens[0]) - .withSoftLimit(limits[0] as Integer).withHardLimit(limits[1] as Integer) + ulimits << Ulimit.builder().name(tokens[0]).softLimit(limits[0] as Integer).hardLimit(limits[1] as Integer).build() else - ulimits << new Ulimit().withName(tokens[0]).withSoftLimit(limits[0] as Integer) + ulimits << Ulimit.builder().name(tokens[0]).softLimit(limits[0] as Integer).build() } if ( ulimits.size() ) - containerProperties.setUlimits(ulimits) + containerProperties.ulimits(ulimits) } protected static LinuxParameters checkLinuxParameters(CmdLineOptionMap options) { - final params = new LinuxParameters() + final params = LinuxParameters.builder() boolean atLeastOneSet = false // shared Memory Size def value = findOptionWithSingleValue(options, 'shm-size') if ( value ) { final sharedMemorySize = MemoryUnit.of(value) - params.setSharedMemorySize(sharedMemorySize.mega as Integer) + params.sharedMemorySize(sharedMemorySize.mega as Integer) atLeastOneSet = true } @@ -117,39 +120,40 @@ class AwsContainerOptionsMapper { findOptionWithMultipleValues(options, 'tmpfs').each { ovalue -> def matcher = ovalue =~ /^(?.*):(?.*?),size=(?.*)$/ if (matcher.matches()) { - tmpfs << new Tmpfs().withContainerPath(matcher.group('path')) - .withSize(matcher.group('sizeMiB') as Integer) - .withMountOptions(matcher.group('options').tokenize(',')) + tmpfs << Tmpfs.builder().containerPath(matcher.group('path')) + .size(matcher.group('sizeMiB') as Integer) + .mountOptions(matcher.group('options').tokenize(',')) + .build() } else { throw new IllegalArgumentException("Found a malformed value '${ovalue}' for --tmpfs option") } } if ( tmpfs ) { - params.setTmpfs(tmpfs) + params.tmpfs(tmpfs) atLeastOneSet = true } // swap limit equal to memory plus swap value = findOptionWithSingleValue(options, 'memory-swap') if ( value ) { - params.setMaxSwap(value as Integer) + params.maxSwap(value as Integer) atLeastOneSet = true } // run an init inside the container if ( findOptionWithBooleanValue(options, 'init') ) { - params.setInitProcessEnabled(true) + params.initProcessEnabled(true) atLeastOneSet = true } // tune container memory swappiness value = findOptionWithSingleValue(options, 'memory-swappiness') if ( value ) { - params.setSwappiness(value as Integer) + params.swappiness(value as Integer) atLeastOneSet = true } - return atLeastOneSet ? params : null + return atLeastOneSet ? params.build() : null } /** diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsOptions.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsOptions.groovy index 1e20a2b7e1..86ea44db9d 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsOptions.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsOptions.groovy @@ -18,7 +18,7 @@ package nextflow.cloud.aws.batch import java.nio.file.Path -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString @@ -124,7 +124,7 @@ class AwsOptions implements CloudTransferOptions { return awsConfig.s3Config.getStorageKmsKeyId() } - CannedAccessControlList getS3Acl() { + ObjectCannedACL getS3Acl() { return awsConfig.s3Config.getS3Acl() } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/BatchHelper.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/BatchHelper.groovy index 6672fb7e80..7ec5de489a 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/BatchHelper.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/BatchHelper.groovy @@ -16,16 +16,16 @@ package nextflow.cloud.aws.batch -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.model.DescribeComputeEnvironmentsRequest -import com.amazonaws.services.batch.model.DescribeJobQueuesRequest -import com.amazonaws.services.batch.model.DescribeJobsRequest -import com.amazonaws.services.ec2.AmazonEC2 -import com.amazonaws.services.ec2.model.DescribeInstanceAttributeRequest -import com.amazonaws.services.ec2.model.InstanceAttributeName -import com.amazonaws.services.ecs.AmazonECS -import com.amazonaws.services.ecs.model.DescribeContainerInstancesRequest -import com.amazonaws.services.ecs.model.DescribeTasksRequest +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.DescribeComputeEnvironmentsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobQueuesRequest +import software.amazon.awssdk.services.batch.model.DescribeJobsRequest +import software.amazon.awssdk.services.ec2.Ec2Client +import software.amazon.awssdk.services.ec2.model.DescribeInstanceAttributeRequest +import software.amazon.awssdk.services.ec2.model.InstanceAttributeName +import software.amazon.awssdk.services.ecs.EcsClient +import software.amazon.awssdk.services.ecs.model.DescribeContainerInstancesRequest +import software.amazon.awssdk.services.ecs.model.DescribeTasksRequest import groovy.transform.CompileStatic import groovy.transform.Memoized /** @@ -36,9 +36,9 @@ import groovy.transform.Memoized @CompileStatic class BatchHelper { - AWSBatch batchClient - AmazonECS ecsClient - AmazonEC2 ec2Client + BatchClient batchClient + EcsClient ecsClient + Ec2Client ec2Client @Memoized(maxCacheSize = 100) protected List getClusterArnByBatchQueue(String queueName) { @@ -47,20 +47,20 @@ class BatchHelper { } protected List getClusterArnByCompEnvNames(List envNames) { - final req = new DescribeComputeEnvironmentsRequest().withComputeEnvironments(envNames) + final req = DescribeComputeEnvironmentsRequest.builder().computeEnvironments(envNames).build() as DescribeComputeEnvironmentsRequest batchClient .describeComputeEnvironments(req) - .getComputeEnvironments() - *.getEcsClusterArn() + .computeEnvironments() + *.ecsClusterArn() } protected List getComputeEnvByQueueName(String queueName) { - final req = new DescribeJobQueuesRequest().withJobQueues(queueName) + final req = DescribeJobQueuesRequest.builder().jobQueues(queueName).build() as DescribeJobQueuesRequest final resp = batchClient.describeJobQueues(req) final result = new ArrayList(10) - for (def queue : resp.getJobQueues()) { - for (def order : queue.getComputeEnvironmentOrder()) { - result.add(order.getComputeEnvironment()) + for (def queue : resp.jobQueues()) { + for (def order : queue.computeEnvironmentOrder()) { + result.add(order.computeEnvironment()) } } return result @@ -78,13 +78,14 @@ class BatchHelper { } protected String getContainerIdByClusterAndTaskArn(String clusterArn, String taskArn) { - final describeTaskReq = new DescribeTasksRequest() - .withCluster(clusterArn) - .withTasks(taskArn) + final describeTaskReq = DescribeTasksRequest.builder() + .cluster(clusterArn) + .tasks(taskArn) + .build() final containers = ecsClient .describeTasks(describeTaskReq) - .getTasks() - *.getContainerInstanceArn() + .tasks() + *.containerInstanceArn() if( containers.size()==1 ) return containers.get(0) if( containers.size()==0 ) @@ -94,13 +95,14 @@ class BatchHelper { } protected String getInstanceIdByClusterAndContainerId(String clusterArn, String containerId) { - final describeContainerReq = new DescribeContainerInstancesRequest() - .withCluster(clusterArn) - .withContainerInstances(containerId) + final describeContainerReq = DescribeContainerInstancesRequest.builder() + .cluster(clusterArn) + .containerInstances(containerId) + .build() final instanceIds = ecsClient .describeContainerInstances(describeContainerReq) - .getContainerInstances() - *.getEc2InstanceId() + .containerInstances() + *.ec2InstanceId() if( !instanceIds ) return null if( instanceIds.size()==1 ) @@ -112,13 +114,13 @@ class BatchHelper { @Memoized(maxCacheSize = 100) protected String getInstanceTypeByInstanceId(String instanceId) { assert instanceId - final instanceAttributeReq = new DescribeInstanceAttributeRequest() - .withInstanceId(instanceId) - .withAttribute(InstanceAttributeName.InstanceType) + final instanceAttributeReq = DescribeInstanceAttributeRequest.builder() + .instanceId(instanceId) + .attribute(InstanceAttributeName.INSTANCE_TYPE) + .build() ec2Client .describeInstanceAttribute(instanceAttributeReq) - .getInstanceAttribute() - .getInstanceType() + .instanceType() } @@ -132,14 +134,14 @@ class BatchHelper { return null } - def describeJob(String jobId) { - def req = new DescribeJobsRequest().withJobs(jobId) + String describeJob(String jobId) { + final req = DescribeJobsRequest.builder().jobs(jobId).build() batchClient .describeJobs(req) - .getJobs() + .jobs() .get(0) - .getContainer() - .getContainerInstanceArn() + .container() + .containerInstanceArn() } String getInstanceTypeByQueueAndContainerArn(String queue, String containerArn) { diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy index d1ae070bda..869c577729 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy @@ -20,7 +20,7 @@ package nextflow.cloud.aws.config import java.nio.file.Path import java.nio.file.Paths -import com.amazonaws.regions.Regions +import software.amazon.awssdk.regions.Region import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Global @@ -83,7 +83,7 @@ class AwsConfig { String getS3GlobalRegion() { return !region || !s3Config.endpoint || s3Config.endpoint.contains(".amazonaws.com") - ? Regions.US_EAST_1.getName() // always use US_EAST_1 as global region for AWS endpoints + ? Region.US_EAST_1.id() // always use US_EAST_1 as global region for AWS endpoints : region // for custom endpoint use the config provided region } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy index 8e03ee4f81..658a533f60 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy @@ -19,7 +19,7 @@ package nextflow.cloud.aws.config import static nextflow.cloud.aws.util.AwsHelper.* -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.SysEnv @@ -43,7 +43,7 @@ class AwsS3Config { private Boolean debug - private CannedAccessControlList s3Acl + private ObjectCannedACL s3Acl private Boolean pathStyleAccess @@ -106,7 +106,7 @@ class AwsS3Config { return debug } - CannedAccessControlList getS3Acl() { + ObjectCannedACL getS3Acl() { return s3Acl } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/mail/AwsMailProvider.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/mail/AwsMailProvider.groovy index cc18cd7ba9..38ab936514 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/mail/AwsMailProvider.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/mail/AwsMailProvider.groovy @@ -18,12 +18,10 @@ package nextflow.cloud.aws.mail import javax.mail.internet.MimeMessage -import java.nio.ByteBuffer - -import com.amazonaws.services.simpleemail.AmazonSimpleEmailService -import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClientBuilder -import com.amazonaws.services.simpleemail.model.RawMessage -import com.amazonaws.services.simpleemail.model.SendRawEmailRequest +import software.amazon.awssdk.core.SdkBytes +import software.amazon.awssdk.services.ses.SesClient +import software.amazon.awssdk.services.ses.model.RawMessage +import software.amazon.awssdk.services.ses.model.SendRawEmailRequest import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.mail.MailProvider @@ -57,15 +55,13 @@ class AwsMailProvider implements MailProvider { final outputStream = new ByteArrayOutputStream() message.writeTo(outputStream) // send the email - final rawMessage = new RawMessage(ByteBuffer.wrap(outputStream.toByteArray())) - final result = client.sendRawEmail(new SendRawEmailRequest(rawMessage)); + final rawMessage = RawMessage.builder().data(SdkBytes.fromByteArray(outputStream.toByteArray())).build() + final result = client.sendRawEmail(SendRawEmailRequest.builder().rawMessage(rawMessage).build()) log.debug "Mail message sent: ${result}" } - AmazonSimpleEmailService getEmailClient() { - return AmazonSimpleEmailServiceClientBuilder - .standard() - .build() + SesClient getEmailClient() { + return SesClient.builder().build() } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Client.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Client.java index 25aadbb2c8..9c7acbd1a8 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Client.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Client.java @@ -20,68 +20,25 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.nio.file.FileVisitOption; -import java.nio.file.FileVisitResult; -import java.nio.file.FileVisitor; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; -import java.util.EnumSet; import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import com.amazonaws.AmazonClientException; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.regions.Region; -import com.amazonaws.regions.RegionUtils; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.Headers; -import com.amazonaws.services.s3.model.AccessControlList; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CopyObjectRequest; -import com.amazonaws.services.s3.model.CopyPartRequest; -import com.amazonaws.services.s3.model.CopyPartResult; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.GetObjectTaggingRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.ObjectTagging; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.SSEAlgorithm; -import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.model.Tag; -import com.amazonaws.services.s3.transfer.Download; -import com.amazonaws.services.s3.transfer.MultipleFileUpload; -import com.amazonaws.services.s3.transfer.ObjectCannedAclProvider; -import com.amazonaws.services.s3.transfer.ObjectMetadataProvider; -import com.amazonaws.services.s3.transfer.ObjectTaggingProvider; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.TransferManagerBuilder; -import com.amazonaws.services.s3.transfer.Upload; -import com.amazonaws.services.s3.transfer.UploadContext; +import java.util.Properties; +import java.util.concurrent.*; +import java.util.function.Consumer; + +import nextflow.cloud.aws.nio.util.S3SyncClientConfiguration; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.transfer.s3.model.*; +import nextflow.cloud.aws.AwsClientFactory; +import nextflow.cloud.aws.nio.util.S3AsyncClientConfiguration; import nextflow.cloud.aws.nio.util.S3MultipartOptions; import nextflow.cloud.aws.util.AwsHelper; -import nextflow.extension.FilesEx; -import nextflow.util.Duration; -import nextflow.util.ThreadPoolHelper; import nextflow.util.ThreadPoolManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,21 +47,21 @@ /** * Client Amazon S3 - * @see com.amazonaws.services.s3.AmazonS3Client + * @see software.amazon.awssdk.services.s3.S3Client */ public class S3Client { private static final Logger log = LoggerFactory.getLogger(S3Client.class); - private AmazonS3 client; + private software.amazon.awssdk.services.s3.S3Client client; - private CannedAccessControlList cannedAcl; + private ObjectCannedACL cannedAcl; private String kmsKeyId; - private SSEAlgorithm storageEncryption; + private ServerSideEncryption storageEncryption; - private TransferManager transferManager; + private S3TransferManager transferManager; private ExecutorService transferPool; @@ -112,133 +69,165 @@ public class S3Client { private Integer uploadMaxThreads = 10; - private Boolean isRequesterPaysEnabled = false; + private Boolean isRequesterPaysEnabled = false; - public S3Client(AmazonS3 client) { - this.client = client; + private String callerAccount; + + private AwsClientFactory factory; + + private Properties props; + + private boolean global; + + public S3Client(AwsClientFactory factory, Properties props, boolean global) { + S3SyncClientConfiguration clientConfig = S3SyncClientConfiguration.create(props); + this.factory = factory; + this.props = props; + this.global = global; + this.client = factory.getS3Client(clientConfig, global); + this.callerAccount = fetchCallerAccount(); } - public S3Client(ClientConfiguration config, AWSCredentials creds, String region) { - this.client = AmazonS3ClientBuilder - .standard() - .withCredentials(new AWSStaticCredentialsProvider(creds)) - .withClientConfiguration(config) - .withRegion(region) - .build(); + /** + * AmazonS3Client#getS3AccountOwner() is not available in SDK v2. + * The STSClient#getCallerIdentity returns the account, but it does not include the canonical ID required for ACLs. + * + * This function and the fetchCallerAccount() emulate the old behavior retrieving the canonicalId can only be + * retrieved if the user owns a bucket. + */ + public String getCallerAccount() { + return callerAccount; + } + + private String fetchCallerAccount(){ + try { + List buckets = client.listBuckets(ListBucketsRequest.builder().maxBuckets(1).build()).buckets(); + if (buckets == null || buckets.isEmpty()) + return null; + return getBucketAcl(buckets.getFirst().name()).owner().id(); + }catch (Throwable e){ + log.debug("Exception fetching caller account", e); + return null; + } } + /** - * @see com.amazonaws.services.s3.AmazonS3Client#listBuckets() + * @see software.amazon.awssdk.services.s3.S3Client#listBuckets() */ public List listBuckets() { - return client.listBuckets(); + return client.listBuckets().buckets(); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#listObjects(ListObjectsRequest) + * @see software.amazon.awssdk.services.s3.S3Client#listObjects(ListObjectsRequest) */ - public ObjectListing listObjects(ListObjectsRequest request) { + public ListObjectsResponse listObjects(ListObjectsRequest request) { return client.listObjects(request); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#getObject(String, String) + * @see software.amazon.awssdk.services.s3.S3Client#getObject */ - public S3Object getObject(String bucketName, String key) { - GetObjectRequest req = new GetObjectRequest(bucketName, key, isRequesterPaysEnabled); - return client.getObject(req); + public ResponseInputStream getObject(String bucketName, String key) { + GetObjectRequest.Builder reqBuilder = GetObjectRequest.builder().bucket(bucketName).key(key); + if( this.isRequesterPaysEnabled ) + reqBuilder.requestPayer(RequestPayer.REQUESTER); + return client.getObject(reqBuilder.build()); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#putObject(String, String, File) + * @see software.amazon.awssdk.services.s3.S3Client#putObject */ - public PutObjectResult putObject(String bucket, String key, File file) { - PutObjectRequest req = new PutObjectRequest(bucket, key, file); + public PutObjectResponse putObject(String bucket, String key, File file) { + PutObjectRequest.Builder builder = PutObjectRequest.builder().bucket(bucket).key(key); if( cannedAcl != null ) { log.trace("Setting canned ACL={}; bucket={}; key={}", cannedAcl, bucket, key); - req.withCannedAcl(cannedAcl); + builder.acl(cannedAcl); } - return client.putObject(req); + return client.putObject(builder.build(), file.toPath()); } - private PutObjectRequest preparePutObjectRequest(PutObjectRequest req, ObjectMetadata metadata, List tags, String contentType, String storageClass) { - req.withMetadata(metadata); + private PutObjectRequest preparePutObjectRequest(PutObjectRequest.Builder reqBuilder, List tags, String contentType, String storageClass) { if( cannedAcl != null ) { - req.withCannedAcl(cannedAcl); + reqBuilder.acl(cannedAcl); } if( tags != null && tags.size()>0 ) { - req.setTagging(new ObjectTagging(tags)); + reqBuilder.tagging(Tagging.builder().tagSet(tags).build()); } if( kmsKeyId != null ) { - req.withSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( storageEncryption!=null ) { - metadata.setSSEAlgorithm(storageEncryption.toString()); + reqBuilder.serverSideEncryption(storageEncryption); } if( contentType!=null ) { - metadata.setContentType(contentType); + reqBuilder.contentType(contentType); } if( storageClass!=null ) { - req.setStorageClass(storageClass); + reqBuilder.storageClass(storageClass); } - return req; + return reqBuilder.build(); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#putObject(String, String, java.io.InputStream, ObjectMetadata) + * @see software.amazon.awssdk.services.s3.S3Client#putObject */ - public PutObjectResult putObject(String bucket, String keyName, InputStream inputStream, ObjectMetadata metadata, List tags, String contentType) { - PutObjectRequest req = new PutObjectRequest(bucket, keyName, inputStream, metadata); + public PutObjectResponse putObject(String bucket, String keyName, InputStream inputStream, List tags, String contentType, long contentLength) { + PutObjectRequest.Builder reqBuilder = PutObjectRequest.builder() + .bucket(bucket) + .key(keyName); if( cannedAcl != null ) { - req.withCannedAcl(cannedAcl); + reqBuilder.acl(cannedAcl); } if( tags != null && tags.size()>0 ) { - req.setTagging(new ObjectTagging(tags)); + reqBuilder.tagging(Tagging.builder().tagSet(tags).build()); } if( kmsKeyId != null ) { - req.withSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( storageEncryption!=null ) { - metadata.setSSEAlgorithm(storageEncryption.toString()); + reqBuilder.serverSideEncryption(storageEncryption); } if( contentType!=null ) { - metadata.setContentType(contentType); + reqBuilder.contentType(contentType); } + PutObjectRequest req = reqBuilder.build(); if( log.isTraceEnabled() ) { log.trace("S3 PutObject request {}", req); } - return client.putObject(req); + return client.putObject(req, RequestBody.fromInputStream(inputStream, contentLength)); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#deleteObject(String, String) + * @see software.amazon.awssdk.services.s3.S3Client#deleteObject */ public void deleteObject(String bucket, String key) { - client.deleteObject(bucket, key); + client.deleteObject(DeleteObjectRequest.builder().bucket(bucket).key(key).build()); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#copyObject(CopyObjectRequest) + * @see software.amazon.awssdk.services.s3.S3Client#copyObject(CopyObjectRequest) */ - public void copyObject(CopyObjectRequest req, List tags, String contentType, String storageClass) { - if( tags !=null && tags.size()>0 ) { - req.setNewObjectTagging(new ObjectTagging(tags)); + public void copyObject(CopyObjectRequest.Builder reqBuilder, List tags, String contentType, String storageClass) { + if( tags !=null && !tags.isEmpty()) { + log.debug("Setting tags: {}", tags); + reqBuilder.taggingDirective(TaggingDirective.REPLACE); + reqBuilder.tagging(Tagging.builder().tagSet(tags).build()); } if( cannedAcl != null ) { - req.withCannedAccessControlList(cannedAcl); + reqBuilder.acl(cannedAcl); } - // getNewObjectMetadata returns null if no object metadata has been specified. - ObjectMetadata meta = req.getNewObjectMetadata() != null ? req.getNewObjectMetadata() : new ObjectMetadata(); if( storageEncryption != null ) { - meta.setSSEAlgorithm(storageEncryption.toString()); - req.setNewObjectMetadata(meta); + reqBuilder.serverSideEncryption(storageEncryption); } if( kmsKeyId !=null ) { - req.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsKeyId)); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( contentType!=null ) { - meta.setContentType(contentType); - req.setNewObjectMetadata(meta); + reqBuilder.metadataDirective(MetadataDirective.REPLACE); + reqBuilder.contentType(contentType); } if( storageClass!=null ) { - req.setStorageClass(storageClass); + reqBuilder.storageClass(storageClass); } + CopyObjectRequest req = reqBuilder.build(); if( log.isTraceEnabled() ) { log.trace("S3 CopyObject request {}", req); } @@ -247,22 +236,11 @@ public void copyObject(CopyObjectRequest req, List tags, String contentType } /** - * @see com.amazonaws.services.s3.AmazonS3Client#getBucketAcl(String) + * @see software.amazon.awssdk.services.s3.S3Client#getBucketAcl */ - public AccessControlList getBucketAcl(String bucket) { - return client.getBucketAcl(bucket); - } - /** - * @see com.amazonaws.services.s3.AmazonS3Client#getS3AccountOwner() - */ - public Owner getS3AccountOwner() { - return client.getS3AccountOwner(); - } - /** - * @see com.amazonaws.services.s3.AmazonS3Client#setEndpoint(String) - */ - public void setEndpoint(String endpoint) { - client.setEndpoint(endpoint); + public AccessControlPolicy getBucketAcl(String bucket) { + GetBucketAclResponse response = client.getBucketAcl(GetBucketAclRequest.builder().bucket(bucket).build()); + return AccessControlPolicy.builder().grants(response.grants()).owner(response.owner()).build(); } public void setCannedAcl(String acl) { @@ -282,16 +260,16 @@ public void setKmsKeyId(String kmsKeyId) { public void setStorageEncryption(String alg) { if( alg == null ) return; - this.storageEncryption = SSEAlgorithm.fromString(alg); + this.storageEncryption = ServerSideEncryption.fromValue(alg); log.debug("Setting S3 SSE storage encryption algorithm={}", alg); } - public void setRequesterPaysEnabled(String requesterPaysEnabled) { - if( requesterPaysEnabled == null ) - return; - this.isRequesterPaysEnabled = Boolean.valueOf(requesterPaysEnabled); - log.debug("Setting S3 requester pays enabled={}", isRequesterPaysEnabled); - } + public void setRequesterPaysEnabled(String requesterPaysEnabled) { + if( requesterPaysEnabled == null ) + return; + this.isRequesterPaysEnabled = Boolean.valueOf(requesterPaysEnabled); + log.debug("Setting S3 requester pays enabled={}", isRequesterPaysEnabled); + } public void setUploadChunkSize(String value) { if( value==null ) @@ -319,46 +297,42 @@ public void setUploadMaxThreads(String value) { } } - public CannedAccessControlList getCannedAcl() { + public ObjectCannedACL getCannedAcl() { return cannedAcl; } - public AmazonS3 getClient() { + public software.amazon.awssdk.services.s3.S3Client getClient() { return client; } - public void setRegion(String regionName) { - Region region = RegionUtils.getRegion(regionName); - if( region == null ) - throw new IllegalArgumentException("Not a valid S3 region name: " + regionName); - client.setRegion(region); - } - - /** - * @see com.amazonaws.services.s3.AmazonS3Client#getObjectAcl(String, String) + * @see software.amazon.awssdk.services.s3.S3Client#getObjectAcl */ - public AccessControlList getObjectAcl(String bucketName, String key) { - return client.getObjectAcl(bucketName, key); + public AccessControlPolicy getObjectAcl(String bucketName, String key) { + GetObjectAclResponse response = client.getObjectAcl(GetObjectAclRequest.builder().bucket(bucketName).key(key).build()); + return AccessControlPolicy.builder().grants(response.grants()).owner(response.owner()).build(); } /** - * @see com.amazonaws.services.s3.AmazonS3Client#getObjectMetadata(String, String) + * @see software.amazon.awssdk.services.s3.S3Client#headObject */ - public ObjectMetadata getObjectMetadata(String bucketName, String key) { - return client.getObjectMetadata(bucketName, key); + public HeadObjectResponse getObjectMetadata(String bucketName, String key) { + return client.headObject(HeadObjectRequest.builder().bucket(bucketName).key(key).build()); } public List getObjectTags(String bucketName, String key) { - return client.getObjectTagging(new GetObjectTaggingRequest(bucketName,key)).getTagSet(); + return client.getObjectTagging(GetObjectTaggingRequest.builder().bucket(bucketName).key(key).build()).tagSet(); } - /** - * @see com.amazonaws.services.s3.AmazonS3Client#listNextBatchOfObjects(com.amazonaws.services.s3.model.ObjectListing) - */ - public ObjectListing listNextBatchOfObjects(ObjectListing objectListing) { - return client.listNextBatchOfObjects(objectListing); - } + public String getObjectKmsKeyId(String bucketName, String key) { + return getObjectMetadata(bucketName, key).ssekmsKeyId(); + } + /** + * @see software.amazon.awssdk.services.s3.S3Client#listObjectsV2Paginator + */ + public ListObjectsV2Iterable listObjectsV2Paginator(ListObjectsV2Request request) { + return client.listObjectsV2Paginator(request); + } public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSize, S3MultipartOptions opts, List tags, String contentType, String storageClass ) { @@ -367,39 +341,39 @@ public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSiz final String sourceS3Path = "s3://"+sourceBucketName+'/'+sourceObjectKey; final String targetBucketName = s3Target.getBucket(); final String targetObjectKey = s3Target.getKey(); - final ObjectMetadata meta = new ObjectMetadata(); // Step 2: Initialize - InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(targetBucketName, targetObjectKey); + CreateMultipartUploadRequest.Builder reqBuilder = CreateMultipartUploadRequest.builder() + .bucket(targetBucketName) + .key(targetObjectKey); + if( cannedAcl!=null ) { - initiateRequest.withCannedACL(cannedAcl); + reqBuilder.acl(cannedAcl); } if( storageEncryption!=null ) { - meta.setSSEAlgorithm(storageEncryption.toString()); - initiateRequest.withObjectMetadata(meta); + reqBuilder.serverSideEncryption(storageEncryption); } if( kmsKeyId != null ) { - initiateRequest.setSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( tags != null && tags.size()>0 ) { - initiateRequest.setTagging( new ObjectTagging(tags)); + reqBuilder.tagging( Tagging.builder().tagSet(tags).build() ); } if( contentType!=null ) { - meta.setContentType(contentType); - initiateRequest.withObjectMetadata(meta); + reqBuilder.contentType(contentType); } if( storageClass!=null ) { - initiateRequest.setStorageClass(StorageClass.fromValue(storageClass)); + reqBuilder.storageClass(StorageClass.fromValue(storageClass)); } - InitiateMultipartUploadResult initResult = client.initiateMultipartUpload(initiateRequest); + CreateMultipartUploadResponse initResult = client.createMultipartUpload(reqBuilder.build()); // Step 3: Save upload Id. - String uploadId = initResult.getUploadId(); + String uploadId = initResult.uploadId(); // Multipart upload and copy allows max 10_000 parts // each part can be up to 5 GB @@ -408,7 +382,7 @@ public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSiz final int defChunkSize = opts.getChunkSize(); final long partSize = computePartSize(objectSize, defChunkSize); ExecutorService executor = S3OutputStream.getOrCreateExecutor(opts.getMaxThreads()); - List> copyPartRequests = new ArrayList<>(); + List> copyPartRequests = new ArrayList<>(); checkPartSize(partSize); // Step 4. create copy part requests @@ -421,15 +395,15 @@ public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSiz if( lastPosition >= objectSize ) lastPosition = objectSize - 1; - CopyPartRequest copyRequest = new CopyPartRequest() - .withDestinationBucketName(targetBucketName) - .withDestinationKey(targetObjectKey) - .withSourceBucketName(sourceBucketName) - .withSourceKey(sourceObjectKey) - .withUploadId(uploadId) - .withFirstByte(bytePosition) - .withLastByte(lastPosition) - .withPartNumber(i); + UploadPartCopyRequest copyRequest = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucketName) + .sourceKey(sourceObjectKey) + .destinationBucket(targetBucketName) + .destinationKey(targetObjectKey) + .uploadId(uploadId) + .partNumber(i) + .copySourceRange("bytes=" + bytePosition + "-" + lastPosition) // e.g., "bytes=0-5242879" + .build(); copyPartRequests.add( copyPart(client, copyRequest, opts) ); bytePosition += partSize; @@ -437,59 +411,64 @@ public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSiz log.trace("Starting multipart copy from: {} to {} -- uploadId={}; objectSize={}; chunkSize={}; numOfChunks={}", s3Source, s3Target, uploadId, objectSize, partSize, copyPartRequests.size() ); - List etags = new ArrayList<>(); - List> responses; + + List completedParts = new ArrayList<>(); try { // Step 5. Start parallel parts copy - responses = executor.invokeAll(copyPartRequests); - + List> futures = executor.invokeAll(copyPartRequests); // Step 6. Fetch all results - for (Future response : responses) { - CopyPartResult result = response.get(); - etags.add(new PartETag(result.getPartNumber(), result.getETag())); + for (Future future : futures) { + completedParts.add(future.get()); } - } - catch( Exception e ) { + } catch( Exception e ) { throw new IllegalStateException("Multipart copy reported an unexpected error -- uploadId=" + uploadId, e); } // Step 7. Complete copy operation - CompleteMultipartUploadRequest completeRequest = new - CompleteMultipartUploadRequest( - targetBucketName, - targetObjectKey, - initResult.getUploadId(), - etags); + CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder() + .parts(completedParts) + .build(); + + CompleteMultipartUploadRequest completeRequest = CompleteMultipartUploadRequest.builder() + .bucket(targetBucketName) + .key(targetObjectKey) + .uploadId(uploadId) + .multipartUpload(completedUpload) + .build(); log.trace("Completing multipart copy uploadId={}", uploadId); client.completeMultipartUpload(completeRequest); } - static Callable copyPart( final AmazonS3 client, final CopyPartRequest request, final S3MultipartOptions opts ) { - return new Callable() { + static Callable copyPart( final software.amazon.awssdk.services.s3.S3Client client, final UploadPartCopyRequest request, final S3MultipartOptions opts ) { + return new Callable() { @Override - public CopyPartResult call() throws Exception { + public CompletedPart call() throws Exception { return copyPart0(client,request,opts); } }; } - static CopyPartResult copyPart0(AmazonS3 client, CopyPartRequest request, S3MultipartOptions opts) throws IOException, InterruptedException { + static CompletedPart copyPart0(software.amazon.awssdk.services.s3.S3Client client, UploadPartCopyRequest request, S3MultipartOptions opts) throws IOException, InterruptedException { - final String objectId = request.getUploadId(); - final int partNumber = request.getPartNumber(); - final long len = request.getLastByte() - request.getFirstByte(); + final String objectId = request.uploadId(); + final int partNumber = request.partNumber(); + final String range = request.copySourceRange(); int attempt=0; - CopyPartResult result=null; + CompletedPart result=null; while( result == null ) { attempt++; try { - log.trace("Copying multipart {} with length {} attempt {} for {} ", partNumber, len, attempt, objectId); - result = client.copyPart(request); + log.trace("Copying multipart {} with length {} attempt {} for {} ", partNumber, range, attempt, objectId); + UploadPartCopyResponse response = client.uploadPartCopy(request); + result = CompletedPart.builder() + .partNumber(partNumber) + .eTag(response.copyPartResult().eTag()) + .build(); } - catch (AmazonClientException e) { + catch (SdkException e) { if( attempt >= opts.getMaxAttempts() ) throw new IOException("Failed to upload multipart data to Amazon S3", e); @@ -503,161 +482,111 @@ static CopyPartResult copyPart0(AmazonS3 client, CopyPartRequest request, S3Mult // ===== transfer manager section ===== - synchronized TransferManager transferManager() { + synchronized S3TransferManager transferManager() { if( transferManager==null ) { log.debug("Creating S3 transfer manager pool - chunk-size={}; max-treads={};", uploadChunkSize, uploadMaxThreads); transferPool = ThreadPoolManager.create("S3TransferManager", uploadMaxThreads); - transferManager = TransferManagerBuilder.standard() - .withS3Client(getClient()) - .withMinimumUploadPartSize(uploadChunkSize) - .withExecutorFactory(() -> transferPool) + transferManager = S3TransferManager.builder() + .s3Client(factory.getS3AsyncClient(S3AsyncClientConfiguration.create(props), uploadChunkSize, global)) + .executor(transferPool) .build(); } return transferManager; } - public void downloadFile(S3Path source, File target) { - Download download = transferManager() - .download(source.getBucket(), source.getKey(), target); - try { - download.waitForCompletion(); - } - catch (InterruptedException e) { - log.debug("S3 download file: s3://{}/{} interrupted",source.getBucket(), source.getKey()); + public void downloadFile(S3Path source, File target) throws IOException { + DownloadFileRequest downloadFileRequest = DownloadFileRequest.builder() + .getObjectRequest(b -> b.bucket(source.getBucket()).key(source.getKey())) + .destination(target) + .build(); + + FileDownload downloadFile = transferManager().downloadFile(downloadFileRequest); + try{ + downloadFile.completionFuture().get(); + } catch (InterruptedException e){ + log.debug("S3 download file: s3://{}/{} cancelled", source.getBucket(), source.getKey()); Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + log.debug("S3 download file: s3://{}/{} exception thrown", source.getBucket(), source.getKey()); + throw new IOException(e.getCause()); } - catch (AmazonS3Exception e) { - throw e; - } + } public void downloadDirectory(S3Path source, File targetFile) throws IOException { - // - // the download directory method provided by the TransferManager replicates - // the source files directory structure in the target path - // see https://github.com/aws/aws-sdk-java/issues/1321 - // - // just traverse to source path a copy all files - // - final Path target = targetFile.toPath(); - final List allDownloads = new ArrayList<>(); - - FileVisitor visitor = new SimpleFileVisitor() { - - public FileVisitResult preVisitDirectory(Path current, BasicFileAttributes attr) throws IOException { - // get the *delta* path against the source path - Path rel = source.relativize(current); - String delta = rel != null ? rel.toString() : null; - Path newFolder = delta != null ? target.resolve(delta) : target; - if(log.isTraceEnabled()) - log.trace("Copy DIR: " + current + " -> " + newFolder); - // this `copy` creates the new folder, but does not copy the contained files - Files.createDirectory(newFolder); - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFile(Path current, BasicFileAttributes attr) { - // get the *delta* path against the source path - Path rel = source.relativize(current); - String delta = rel != null ? rel.toString() : null; - Path newFile = delta != null ? target.resolve(delta) : target; - if( log.isTraceEnabled()) - log.trace("Copy file: " + current + " -> "+ FilesEx.toUriString(newFile)); - - String sourceKey = ((S3Path) current).getKey(); - Download it = transferManager() .download(source.getBucket(), sourceKey, newFile.toFile()); - allDownloads.add(it); - - return FileVisitResult.CONTINUE; - } - - }; - - Files.walkFileTree(source, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, visitor); + DownloadDirectoryRequest downloadDirRequest = DownloadDirectoryRequest.builder() + .bucket(source.getBucket()) + .listObjectsV2RequestTransformer(builder -> builder.prefix(source.getKey())) + .destination(targetFile.toPath()) + .build(); - try { - while(allDownloads.size()>0) { - allDownloads.get(0).waitForCompletion(); - allDownloads.remove(0); - } - } - catch (InterruptedException e) { - log.debug("S3 download directory: s3://{}/{} interrupted", source.getBucket(), source.getKey()); + DirectoryDownload downloadDirectory = transferManager().downloadDirectory(downloadDirRequest); + try{ + downloadDirectory.completionFuture().get(); + } catch (InterruptedException e){ + log.debug("S3 download directory: s3://{}/{} cancelled", source.getBucket(), source.getKey()); Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + log.debug("S3 deownload directory: s3://{}/{} exception thrown", source.getBucket(), source.getKey()); + throw new IOException(e.getCause()); } } - public void uploadFile(File source, S3Path target) { - PutObjectRequest req = new PutObjectRequest(target.getBucket(), target.getKey(), source); - ObjectMetadata metadata = new ObjectMetadata(); - preparePutObjectRequest(req, metadata, target.getTagsList(), target.getContentType(), target.getStorageClass()); + public void uploadFile(File source, S3Path target) throws IOException{ + PutObjectRequest.Builder req = PutObjectRequest.builder().bucket(target.getBucket()).key(target.getKey()); + preparePutObjectRequest(req, target.getTagsList(), target.getContentType(), target.getStorageClass()); // initiate transfer - Upload upload = transferManager() .upload(req); - // await for completion - try { - upload.waitForCompletion(); - } - catch (InterruptedException e) { - log.debug("S3 upload file: s3://{}/{} interrupted", target.getBucket(), target.getKey()); + Upload upload = transferManager().upload(UploadRequest.builder().putObjectRequest(req.build()).requestBody(AsyncRequestBody.fromFile(source)).build()); + try{ + upload.completionFuture().get(); + } catch (InterruptedException e){ + log.debug("S3 upload file: s3://{}/{} cancelled", target.getBucket(), target.getKey()); Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + log.debug("S3 upload file: s3://{}/{} exception thrown", target.getBucket(), target.getKey()); + throw new IOException(e.getCause()); } } - /** - * This class is used by the upload directory operation to acquire the mecessary meta info - */ - private class MetadataProvider implements ObjectMetadataProvider, ObjectTaggingProvider, ObjectCannedAclProvider { - - @Override - public CannedAccessControlList provideObjectCannedAcl(File file) { - return cannedAcl; - } + private Consumer transformUploadRequest(List tags) { + return builder -> builder.putObjectRequest(updateBuilder(builder.build().putObjectRequest().toBuilder(), tags).build()); + } - @Override - public void provideObjectMetadata(File file, ObjectMetadata metadata) { - if( storageEncryption!=null ) { - metadata.setSSEAlgorithm(storageEncryption.toString()); - } - if( kmsKeyId!=null ) { - // metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION, SSEAlgorithm.KMS.getAlgorithm()); - metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, kmsKeyId); - } - } + private PutObjectRequest.Builder updateBuilder(PutObjectRequest.Builder porBuilder, List tags) { - @Override - public ObjectTagging provideObjectTags(UploadContext context) { - List tags = uploadTags.get(); - if( tags==null || tags.size()==0 ) - return null; - return new ObjectTagging(new ArrayList<>(tags)); - } + if( cannedAcl != null ) + porBuilder.acl(cannedAcl); + if( storageEncryption != null ) + porBuilder.serverSideEncryption(storageEncryption); + if( kmsKeyId != null ) + porBuilder.ssekmsKeyId(kmsKeyId); + if( tags != null && !tags.isEmpty() ) + porBuilder.tagging(Tagging.builder().tagSet(tags).build()); + return porBuilder; } - final private MetadataProvider metaProvider = new MetadataProvider(); - - final private ThreadLocal> uploadTags = new ThreadLocal<>(); + public void uploadDirectory(File source, S3Path target) throws IOException { + UploadDirectoryRequest request = UploadDirectoryRequest.builder() + .bucket(target.getBucket()) + .s3Prefix(target.getKey()) + .source(source.toPath()) + .uploadFileRequestTransformer(transformUploadRequest(target.getTagsList())) + .build(); - public void uploadDirectory(File source, S3Path target) { - // set the tags to be used in a thread local - uploadTags.set( target.getTagsList() ); // initiate transfer - MultipleFileUpload upload = transferManager() - .uploadDirectory(target.getBucket(), target.getKey(), source, true, metaProvider, metaProvider, metaProvider); - // the tags are fetched by the previous operation - // the thread local can be cleared - uploadTags.remove(); - // await for completion + DirectoryUpload upload = transferManager().uploadDirectory(request); try { - upload.waitForCompletion(); - } - catch (InterruptedException e) { - log.debug("S3 upload file: s3://{}/{} interrupted", target.getBucket(), target.getKey()); + CompletedDirectoryUpload completed = upload.completionFuture().get(); + if (!completed.failedTransfers().isEmpty()){ + log.debug("S3 upload directory: s3://{}/{} failed transfers", target.getBucket(), target.getKey()); + throw new IOException("Some transfers in S3 upload directory: s3://"+ target.getBucket() +"/"+ target.getKey() +" has failed - Transfers: " + completed.failedTransfers() ); + } + } catch (InterruptedException e){ + log.debug("S3 upload directory: s3://{}/{} cancelled", target.getBucket(), target.getKey()); Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + log.debug("S3 upload directory: s3://{}/{} exception thrown", target.getBucket(), target.getKey()); + throw new IOException(e.getCause()); } } - - String getObjectKmsKeyId(String bucketName, String key) { - return getObjectMetadata(bucketName,key).getSSEAwsKmsKeyId(); - } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystem.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystem.java index a749766135..3c76aa87cd 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystem.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystem.java @@ -29,7 +29,7 @@ import java.util.Properties; import java.util.Set; -import com.amazonaws.services.s3.model.Bucket; +import software.amazon.awssdk.services.s3.model.Bucket; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; @@ -84,7 +84,7 @@ public Iterable getRootDirectories() { ImmutableList.Builder builder = ImmutableList.builder(); for (Bucket bucket : client.listBuckets()) { - builder.add(new S3Path(this, bucket.getName())); + builder.add(new S3Path(this, bucket.name())); } return builder.build(); diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystemProvider.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystemProvider.java index 132782e79d..8b21ff3e79 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystemProvider.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3FileSystemProvider.java @@ -60,19 +60,9 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.regions.Regions; -import com.amazonaws.services.s3.model.AccessControlList; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.CopyObjectRequest; -import com.amazonaws.services.s3.model.Grant; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.Permission; -import com.amazonaws.services.s3.model.S3ObjectId; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.s3.model.Tag; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.model.S3Object; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; @@ -81,6 +71,7 @@ import nextflow.cloud.aws.config.AwsConfig; import nextflow.cloud.aws.nio.util.IOUtils; import nextflow.cloud.aws.nio.util.S3MultipartOptions; +import nextflow.cloud.aws.nio.util.S3ObjectId; import nextflow.cloud.aws.nio.util.S3ObjectSummaryLookup; import nextflow.extension.FilesEx; import nextflow.file.CopyOptions; @@ -88,6 +79,7 @@ import nextflow.file.FileSystemTransferAware; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import static com.google.common.collect.Sets.difference; import static java.lang.String.format; @@ -209,14 +201,12 @@ public InputStream newInputStream(Path path, OpenOption... options) result = s3Path .getFileSystem() .getClient() - .getObject(s3Path.getBucket(), s3Path.getKey()) - .getObjectContent(); + .getObject(s3Path.getBucket(), s3Path.getKey()); if (result == null) throw new IOException(String.format("The specified path is a directory: %s", FilesEx.toUriString(s3Path))); - } - catch (AmazonS3Exception e) { - if (e.getStatusCode() == 404) + }catch (AwsServiceException e) { + if (e.statusCode() == 404) throw new NoSuchFileException(path.toString()); // otherwise throws a generic IO exception throw new IOException(String.format("Cannot access file: %s", FilesEx.toUriString(s3Path)),e); @@ -368,16 +358,15 @@ public SeekableByteChannel newByteChannel(Path path, try { InputStream is = s3Path.getFileSystem().getClient() - .getObject(s3Path.getBucket(), s3Path.getKey()) - .getObjectContent(); + .getObject(s3Path.getBucket(), s3Path.getKey()); if (is == null) throw new IOException(String.format("The specified path is a directory: %s", path)); Files.write(tempFile, IOUtils.toByteArray(is)); } - catch (AmazonS3Exception e) { - if (e.getStatusCode() != 404) + catch (S3Exception e) { + if (e.awsErrorDetails().sdkHttpResponse().statusCode() != 404) throw new IOException(String.format("Cannot access file: %s", path),e); } @@ -401,11 +390,6 @@ public void close() throws IOException { seekable.close(); // upload the content where the seekable ends (close) if (Files.exists(tempFile)) { - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(Files.size(tempFile)); - // FIXME: #20 ServiceLoader can't load com.upplication.s3fs.util.FileTypeDetector when this library is used inside a ear :( - metadata.setContentType(Files.probeContentType(tempFile)); - try (InputStream stream = Files.newInputStream(tempFile)) { /* FIXME: if the stream is {@link InputStream#markSupported()} i can reuse the same stream @@ -414,7 +398,7 @@ public void close() throws IOException { */ s3Path.getFileSystem() .getClient() - .putObject(s3Path.getBucket(), s3Path.getKey(), stream, metadata, tags, contentType); + .putObject(s3Path.getBucket(), s3Path.getKey(), stream, tags, contentType, Files.size(tempFile)); } } else { @@ -477,15 +461,13 @@ public void createDirectory(Path dir, FileAttribute... attrs) "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO List tags = s3Path.getTagsList(); - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(0); String keyName = s3Path.getKey() + (s3Path.getKey().endsWith("/") ? "" : "/"); s3Path.getFileSystem() .getClient() - .putObject(s3Path.getBucket(), keyName, new ByteArrayInputStream(new byte[0]), metadata, tags, null); + .putObject(s3Path.getBucket(), keyName, new ByteArrayInputStream(new byte[0]), tags, null, 0); } @Override @@ -548,18 +530,22 @@ public void copy(Path source, Path target, CopyOption... options) S3Client client = s3Source.getFileSystem() .getClient(); Properties props = s3Target.getFileSystem().properties(); - final ObjectMetadata sourceObjMetadata = s3Source.getFileSystem().getClient().getObjectMetadata(s3Source.getBucket(), s3Source.getKey()); + final HeadObjectResponse sourceObjMetadata = s3Source.getFileSystem().getClient().getObjectMetadata(s3Source.getBucket(), s3Source.getKey()); final S3MultipartOptions opts = props != null ? new S3MultipartOptions(props) : new S3MultipartOptions(); final long maxSize = opts.getMaxCopySize(); - final long length = sourceObjMetadata.getContentLength(); + final long length = sourceObjMetadata.contentLength(); final List tags = ((S3Path) target).getTagsList(); final String contentType = ((S3Path) target).getContentType(); final String storageClass = ((S3Path) target).getStorageClass(); if( length <= maxSize ) { - CopyObjectRequest copyObjRequest = new CopyObjectRequest(s3Source.getBucket(), s3Source.getKey(),s3Target.getBucket(), s3Target.getKey()); + CopyObjectRequest.Builder reqBuilder = CopyObjectRequest.builder() + .sourceBucket(s3Source.getBucket()) + .sourceKey(s3Source.getKey()) + .destinationBucket(s3Target.getBucket()) + .destinationKey(s3Target.getKey()); log.trace("Copy file via copy object - source: source={}, target={}, tags={}, storageClass={}", s3Source, s3Target, tags, storageClass); - client.copyObject(copyObjRequest, tags, contentType, storageClass); + client.copyObject(reqBuilder, tags, contentType, storageClass); } else { log.trace("Copy file via multipart upload - source: source={}, target={}, tags={}, storageClass={}", s3Source, s3Target, tags, storageClass); @@ -609,23 +595,30 @@ public void checkAccess(Path path, AccessMode... modes) throws IOException { } // get ACL and check if the file exists as a side-effect - AccessControlList acl = getAccessControl(s3Path); - + AccessControlPolicy acl = getAccessControl(s3Path); + String caller = client.getCallerAccount(); for (AccessMode accessMode : modes) { switch (accessMode) { case EXECUTE: throw new AccessDeniedException(s3Path.toString(), null, "file is not executable"); case READ: - if (!hasPermissions(acl, client.getS3AccountOwner(), - EnumSet.of(Permission.FullControl, Permission.Read))) { + if (caller == null) { + //if we cannot get the user's canonical ID, try read the object; + s3ObjectSummaryLookup.lookup((S3Path) path); + } + else if (!hasPermissions(acl, caller, + EnumSet.of(Permission.FULL_CONTROL, Permission.READ))) { throw new AccessDeniedException(s3Path.toString(), null, "file is not readable"); } break; case WRITE: - if (!hasPermissions(acl, client.getS3AccountOwner(), - EnumSet.of(Permission.FullControl, Permission.Write))) { + if (caller == null) { + log.warn("User's Canonical Id cannot be retrieved. We can not check the access."); + } + else if (!hasPermissions(acl, caller, + EnumSet.of(Permission.FULL_CONTROL, Permission.WRITE))) { throw new AccessDeniedException(s3Path.toString(), null, format("bucket '%s' is not writable", s3Path.getBucket())); @@ -643,12 +636,12 @@ public void checkAccess(Path path, AccessMode... modes) throws IOException { * @param permissions almost one * @return */ - private boolean hasPermissions(AccessControlList acl, Owner owner, + private boolean hasPermissions(AccessControlPolicy acl, String owner, EnumSet permissions) { boolean result = false; - for (Grant grant : acl.getGrants()) { - if (grant.getGrantee().getIdentifier().equals(owner.getId()) - && permissions.contains(grant.getPermission())) { + for (Grant grant : acl.grants()) { + if (grant.grantee().id().equals(owner) + && permissions.contains(grant.permission())) { result = true; break; } @@ -700,24 +693,24 @@ private Optional readAttr1(S3Path s3Path) throws IOException { } private S3FileAttributes readAttr0(S3Path s3Path) throws IOException { - S3ObjectSummary objectSummary = s3ObjectSummaryLookup.lookup(s3Path); + S3Object objectSummary = s3ObjectSummaryLookup.lookup(s3Path); // parse the data to BasicFileAttributes. FileTime lastModifiedTime = null; - if( objectSummary.getLastModified() != null ) { - lastModifiedTime = FileTime.from(objectSummary.getLastModified().getTime(), TimeUnit.MILLISECONDS); + if( objectSummary.lastModified() != null ) { + lastModifiedTime = FileTime.from(objectSummary.lastModified().toEpochMilli(), TimeUnit.MILLISECONDS); } - long size = objectSummary.getSize(); + long size = objectSummary.size(); boolean directory = false; boolean regularFile = false; - String key = objectSummary.getKey(); + String key = objectSummary.key(); // check if is a directory and the key of this directory exists in amazon s3 - if (objectSummary.getKey().equals(s3Path.getKey() + "/") && objectSummary.getKey().endsWith("/")) { + if (objectSummary.key().equals(s3Path.getKey() + "/") && objectSummary.key().endsWith("/")) { directory = true; } // is a directory but does not exist in amazon s3 - else if ((!objectSummary.getKey().equals(s3Path.getKey()) || "".equals(s3Path.getKey())) && objectSummary.getKey().startsWith(s3Path.getKey())){ + else if ((!objectSummary.key().equals(s3Path.getKey()) || "".equals(s3Path.getKey())) && objectSummary.key().startsWith(s3Path.getKey())){ directory = true; // no metadata, we fake one size = 0; @@ -743,113 +736,27 @@ public void setAttribute(Path path, String attribute, Object value, throw new UnsupportedOperationException(); } - protected ClientConfiguration createClientConfig(Properties props) { - ClientConfiguration config = new ClientConfiguration(); - - if( props == null ) - return config; - - if( props.containsKey("connection_timeout") ) { - log.trace("AWS client config - connection_timeout: {}", props.getProperty("connection_timeout")); - config.setConnectionTimeout(Integer.parseInt(props.getProperty("connection_timeout"))); - } - - if( props.containsKey("max_connections")) { - log.trace("AWS client config - max_connections: {}", props.getProperty("max_connections")); - config.setMaxConnections(Integer.parseInt(props.getProperty("max_connections"))); - } - - if( props.containsKey("max_error_retry")) { - log.trace("AWS client config - max_error_retry: {}", props.getProperty("max_error_retry")); - config.setMaxErrorRetry(Integer.parseInt(props.getProperty("max_error_retry"))); - } - - if( props.containsKey("protocol")) { - log.trace("AWS client config - protocol: {}", props.getProperty("protocol")); - config.setProtocol(Protocol.valueOf(props.getProperty("protocol").toUpperCase())); - } - - if( props.containsKey("proxy_domain")) { - log.trace("AWS client config - proxy_domain: {}", props.getProperty("proxy_domain")); - config.setProxyDomain(props.getProperty("proxy_domain")); - } - - if( props.containsKey("proxy_host")) { - log.trace("AWS client config - proxy_host: {}", props.getProperty("proxy_host")); - config.setProxyHost(props.getProperty("proxy_host")); - } - - if( props.containsKey("proxy_port")) { - log.trace("AWS client config - proxy_port: {}", props.getProperty("proxy_port")); - config.setProxyPort(Integer.parseInt(props.getProperty("proxy_port"))); - } - - if( props.containsKey("proxy_username")) { - log.trace("AWS client config - proxy_username: {}", props.getProperty("proxy_username")); - config.setProxyUsername(props.getProperty("proxy_username")); - } - - if( props.containsKey("proxy_password")) { - log.trace("AWS client config - proxy_password: {}", props.getProperty("proxy_password")); - config.setProxyPassword(props.getProperty("proxy_password")); - } - - if ( props.containsKey("proxy_workstation")) { - log.trace("AWS client config - proxy_workstation: {}", props.getProperty("proxy_workstation")); - config.setProxyWorkstation(props.getProperty("proxy_workstation")); - } - - if ( props.containsKey("signer_override")) { - log.debug("AWS client config - signerOverride: {}", props.getProperty("signer_override")); - config.setSignerOverride(props.getProperty("signer_override")); - } - - if( props.containsKey("socket_send_buffer_size_hints") || props.containsKey("socket_recv_buffer_size_hints") ) { - log.trace("AWS client config - socket_send_buffer_size_hints: {}, socket_recv_buffer_size_hints: {}", props.getProperty("socket_send_buffer_size_hints","0"), props.getProperty("socket_recv_buffer_size_hints", "0")); - int send = Integer.parseInt(props.getProperty("socket_send_buffer_size_hints","0")); - int recv = Integer.parseInt(props.getProperty("socket_recv_buffer_size_hints", "0")); - config.setSocketBufferSizeHints(send,recv); - } - - if( props.containsKey("socket_timeout")) { - log.trace("AWS client config - socket_timeout: {}", props.getProperty("socket_timeout")); - config.setSocketTimeout(Integer.parseInt(props.getProperty("socket_timeout"))); - } - - if( props.containsKey("user_agent")) { - log.trace("AWS client config - user_agent: {}", props.getProperty("user_agent")); - config.setUserAgent(props.getProperty("user_agent")); - } - - return config; - } - - // ~~ - protected S3FileSystem createFileSystem(URI uri, AwsConfig awsConfig) { // try to load amazon props Properties props = loadAmazonProperties(); // add properties for legacy compatibility props.putAll(awsConfig.getS3LegacyProperties()); - S3Client client; - ClientConfiguration clientConfig = createClientConfig(props); - final String bucketName = S3Path.bucketName(uri); // do not use `global` flag for custom endpoint because // when enabling that flag, it overrides S3 endpoints with AWS global endpoint // see https://github.com/nextflow-io/nextflow/pull/5779 final boolean global = bucketName!=null && !awsConfig.getS3Config().isCustomEndpoint(); - final AwsClientFactory factory = new AwsClientFactory(awsConfig, globalRegion(awsConfig)); - client = new S3Client(factory.getS3Client(clientConfig, global)); + final AwsClientFactory factory = new AwsClientFactory(awsConfig, awsConfig.getS3GlobalRegion()); + final S3Client client = new S3Client(factory, props, global); // set the client acl - client.setCannedAcl(getProp(props, "s_3_acl", "s3_acl", "s3Acl")); + client.setCannedAcl(getProp(props, "s_3_acl", "s3_acl", "s3acl", "s3Acl")); client.setStorageEncryption(props.getProperty("storage_encryption")); client.setKmsKeyId(props.getProperty("storage_kms_key_id")); client.setUploadChunkSize(props.getProperty("upload_chunk_size")); client.setUploadMaxThreads(props.getProperty("upload_max_threads")); - client.setRequesterPaysEnabled(props.getProperty("requester_pays_enabled")); + client.setRequesterPaysEnabled(props.getProperty("requester_pays")); if( props.getProperty("glacier_auto_retrieval") != null ) log.warn("Glacier auto-retrieval is no longer supported, config option `aws.client.glacierAutoRetrieval` will be ignored"); @@ -857,12 +764,6 @@ protected S3FileSystem createFileSystem(URI uri, AwsConfig awsConfig) { return new S3FileSystem(this, client, uri, props); } - protected String globalRegion(AwsConfig awsConfig) { - return awsConfig.getRegion() != null && awsConfig.getS3Config().isCustomEndpoint() - ? awsConfig.getRegion() - : Regions.US_EAST_1.getName(); - } - protected String getProp(Properties props, String... keys) { for( String k : keys ) { if( props.containsKey(k) ) { @@ -923,10 +824,8 @@ private boolean exists(S3Path path) { * @return AccessControlList * @throws NoSuchFileException if not found the path and any child */ - private AccessControlList getAccessControl(S3Path path) throws NoSuchFileException{ - S3ObjectSummary obj = s3ObjectSummaryLookup.lookup(path); - // check first for file: - return path.getFileSystem().getClient().getObjectAcl(obj.getBucketName(), obj.getKey()); + private AccessControlPolicy getAccessControl(S3Path path) throws NoSuchFileException{ + return path.getFileSystem().getClient().getObjectAcl(path.getBucket(), path.getKey()); } /** @@ -937,5 +836,4 @@ private AccessControlList getAccessControl(S3Path path) throws NoSuchFileExcepti protected Path createTempDir() throws IOException { return Files.createTempDirectory("temp-s3-"); } - } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Iterator.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Iterator.java index 680c444399..b35d2cc36f 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Iterator.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Iterator.java @@ -22,9 +22,7 @@ import java.util.Iterator; import java.util.List; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; +import software.amazon.awssdk.services.s3.model.*; import com.google.common.base.Preconditions; /** @@ -67,57 +65,46 @@ public boolean hasNext() { private Iterator getIterator() { if (it == null) { - List listPath = new ArrayList<>(); + ListObjectsV2Request request = buildRequest(); - // iterator over this list - ObjectListing current = s3FileSystem.getClient().listObjects(buildRequest()); + S3Client s3Client = s3FileSystem.getClient(); - while (current.isTruncated()) { - // parse the elements - parseObjectListing(listPath, current); - // continue - current = s3FileSystem.getClient().listNextBatchOfObjects(current); - } - - parseObjectListing(listPath, current); - - it = listPath.iterator(); + // This automatically handles pagination + it = s3Client.listObjectsV2Paginator(request).stream().flatMap(r -> parseObjectListing(r).stream()).iterator(); } return it; } - private ListObjectsRequest buildRequest(){ + private ListObjectsV2Request buildRequest(){ - ListObjectsRequest request = new ListObjectsRequest(); - request.setBucketName(bucket); - request.setPrefix(key); - request.setMarker(key); - request.setDelimiter("/"); - return request; + return ListObjectsV2Request.builder() + .bucket(bucket) + .prefix(key) + .delimiter("/") + .build(); } /** * add to the listPath the elements at the same level that s3Path - * @param listPath List not null list to add - * @param current ObjectListing to walk + * @param current ListObjectsResponseto walk */ - private void parseObjectListing(List listPath, ObjectListing current) { - + private List parseObjectListing( ListObjectsV2Response current) { + List listPath = new ArrayList<>(); // add all the objects i.e. the files - for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) { - final String key = objectSummary.getKey(); + for (final S3Object objectSummary : current.contents()) { + final String key = objectSummary.key(); final S3Path path = new S3Path(s3FileSystem, "/" + bucket, key.split("/")); path.setObjectSummary(objectSummary); listPath.add(path); } // add all the common prefixes i.e. the directories - for(final String dir : current.getCommonPrefixes()) { - if( dir.equals("/") ) continue; - listPath.add(new S3Path(s3FileSystem, "/" + bucket, dir)); + for(final CommonPrefix prefix : current.commonPrefixes()) { + if( prefix.prefix().equals("/") ) continue; + listPath.add(new S3Path(s3FileSystem, "/" + bucket, prefix.prefix())); } - + return listPath; } /** diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3OutputStream.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3OutputStream.java index eed9e3cda7..9caf4bbb48 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3OutputStream.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3OutputStream.java @@ -26,7 +26,7 @@ import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; +import java.util.Base64; import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -35,28 +35,13 @@ import java.util.concurrent.Phaser; import java.util.concurrent.atomic.AtomicInteger; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.ObjectTagging; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.S3ObjectId; -import com.amazonaws.services.s3.model.SSEAlgorithm; -import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.model.Tag; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.util.Base64; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; import nextflow.cloud.aws.nio.util.ByteBufferInputStream; import nextflow.cloud.aws.nio.util.S3MultipartOptions; -import nextflow.util.Duration; -import nextflow.util.ThreadPoolHelper; +import nextflow.cloud.aws.nio.util.S3ObjectId; import nextflow.util.ThreadPoolManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,7 +69,7 @@ public final class S3OutputStream extends OutputStream { /** * Amazon S3 API implementation to use. */ - private final AmazonS3 s3; + private final S3Client s3; /** * ID of the S3 object to store data into. @@ -96,7 +81,7 @@ public final class S3OutputStream extends OutputStream { */ private StorageClass storageClass; - private SSEAlgorithm storageEncryption; + private ServerSideEncryption storageEncryption; private String kmsKeyId; @@ -120,7 +105,7 @@ public final class S3OutputStream extends OutputStream { /** * If a multipart upload is in progress, holds the ETags of the uploaded parts, {@code null} otherwise. */ - private Queue partETags; + private Queue completedParts; /** * Holds upload request metadata @@ -157,7 +142,7 @@ public final class S3OutputStream extends OutputStream { private int bufferSize; - private CannedAccessControlList cannedAcl; + private ObjectCannedACL cannedAcl; private List tags; @@ -168,7 +153,7 @@ public final class S3OutputStream extends OutputStream { * No special object metadata or storage class will be attached to the object. * */ - public S3OutputStream(final AmazonS3 s3, S3ObjectId objectId, S3MultipartOptions request) { + public S3OutputStream(final S3Client s3, S3ObjectId objectId, S3MultipartOptions request) { this.s3 = requireNonNull(s3); this.objectId = requireNonNull(objectId); this.request = request; @@ -189,7 +174,7 @@ private ByteBuffer expandBuffer(ByteBuffer byteBuffer) { return expanded; } - public S3OutputStream setCannedAcl(CannedAccessControlList acl) { + public S3OutputStream setCannedAcl(ObjectCannedACL acl) { this.cannedAcl = acl; return this; } @@ -207,7 +192,7 @@ public S3OutputStream setStorageClass(String storageClass) { public S3OutputStream setStorageEncryption(String storageEncryption) { if( storageEncryption!=null ) - this.storageEncryption = SSEAlgorithm.fromString(storageEncryption); + this.storageEncryption = ServerSideEncryption.fromValue(storageEncryption); return this; } @@ -338,13 +323,13 @@ private boolean uploadBuffer(ByteBuffer buf, boolean last) throws IOException { */ private void init() throws IOException { // get the upload id - uploadId = initiateMultipartUpload().getUploadId(); + uploadId = initiateMultipartUpload().uploadId(); if (uploadId == null) { throw new IOException("Failed to get a valid multipart upload ID from Amazon S3"); } // create the executor executor = getOrCreateExecutor(request.getMaxThreads()); - partETags = new LinkedBlockingQueue<>(); + completedParts = new LinkedBlockingQueue<>(); phaser = new Phaser(); phaser.register(); log.trace("[S3 phaser] Register - Starting S3 upload: {}; chunk-size: {}; max-threads: {}", uploadId, bufferSize, request.getMaxThreads()); @@ -420,43 +405,40 @@ public void close() throws IOException { /** * Starts the multipart upload process * - * @return An instance of {@link InitiateMultipartUploadResult} + * @return An instance of {@link CreateMultipartUploadResponse} * @throws IOException */ - private InitiateMultipartUploadResult initiateMultipartUpload() throws IOException { - final InitiateMultipartUploadRequest request = // - new InitiateMultipartUploadRequest(objectId.getBucket(), objectId.getKey()); - final ObjectMetadata metadata = new ObjectMetadata(); + private CreateMultipartUploadResponse initiateMultipartUpload() throws IOException { + final CreateMultipartUploadRequest.Builder reqBuilder = // + CreateMultipartUploadRequest.builder().bucket(objectId.bucket()).key(objectId.key()); if (storageClass != null) { - request.setStorageClass(storageClass); + reqBuilder.storageClass(storageClass); } if( cannedAcl != null ) { - request.withCannedACL(cannedAcl); + reqBuilder.acl(cannedAcl); } if( kmsKeyId !=null ) { - request.withSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( storageEncryption != null ) { - metadata.setSSEAlgorithm(storageEncryption.toString()); - request.setObjectMetadata(metadata); + reqBuilder.serverSideEncryption(storageEncryption); } if( contentType != null ) { - metadata.setContentType(contentType); - request.setObjectMetadata(metadata); + reqBuilder.contentType(contentType); } - + final CreateMultipartUploadRequest request = reqBuilder.build(); if( log.isTraceEnabled() ) { log.trace("S3 initiateMultipartUpload {}", request); } try { - return s3.initiateMultipartUpload(request); - } catch (final AmazonClientException e) { + return s3.createMultipartUpload(request); + } catch (final SdkException e) { throw new IOException("Failed to initiate Amazon S3 multipart upload", e); } } @@ -486,7 +468,7 @@ private void uploadPart( final ByteBuffer buf, final byte[] checksum, final int uploadPart( new ByteBufferInputStream(buf), len, checksum , partNumber, lastPart ); success=true; } - catch (AmazonClientException | IOException e) { + catch (SdkException | IOException e) { if( attempt == request.getMaxAttempts() ) throw new IOException("Failed to upload multipart data to Amazon S3", e); @@ -511,19 +493,20 @@ private void uploadPart(final InputStream content, final long contentLength, fin if (aborted) return; - final UploadPartRequest request = new UploadPartRequest(); - request.setBucketName(objectId.getBucket()); - request.setKey(objectId.getKey()); - request.setUploadId(uploadId); - request.setPartNumber(partNumber); - request.setPartSize(contentLength); - request.setInputStream(content); - request.setLastPart(lastPart); - request.setMd5Digest(Base64.encodeAsString(checksum)); - - final PartETag partETag = s3.uploadPart(request).getPartETag(); - log.trace("Uploaded part {} with length {} for {}: {}", partETag.getPartNumber(), contentLength, objectId, partETag.getETag()); - partETags.add(partETag); + final UploadPartRequest.Builder reqBuilder = UploadPartRequest.builder(); + reqBuilder.bucket(objectId.bucket()); + reqBuilder.key(objectId.key()); + reqBuilder.uploadId(uploadId); + reqBuilder.partNumber(partNumber); + reqBuilder.contentLength(contentLength); + reqBuilder.contentMD5(Base64.getEncoder().encodeToString(checksum)); + + final UploadPartResponse resp = s3.uploadPart(reqBuilder.build(), RequestBody.fromInputStream(content, contentLength)); + log.trace("Uploaded part {} with length {} for {}: {}", partNumber, contentLength, objectId, resp.eTag()); + completedParts.add(CompletedPart.builder() + .partNumber(partNumber) + .eTag(resp.eTag()) + .build()); } @@ -544,9 +527,9 @@ private synchronized void abortMultipartUpload() { log.debug("Aborting multipart upload {} for {}", uploadId, objectId); try { - s3.abortMultipartUpload(new AbortMultipartUploadRequest(objectId.getBucket(), objectId.getKey(), uploadId)); + s3.abortMultipartUpload(AbortMultipartUploadRequest.builder().bucket(objectId.bucket()).key(objectId.key()).uploadId(uploadId).build()); } - catch (final AmazonClientException e) { + catch (final SdkException e) { log.warn("Failed to abort multipart upload {}: {}", uploadId, e.getMessage()); } aborted = true; @@ -562,20 +545,28 @@ private void completeMultipartUpload() throws IOException { // if aborted upload just ignore it if( aborted ) return; - final int partCount = partETags.size(); + final int partCount = completedParts.size(); log.trace("Completing upload to {} consisting of {} parts", objectId, partCount); try { - s3.completeMultipartUpload(new CompleteMultipartUploadRequest( // - objectId.getBucket(), objectId.getKey(), uploadId, new ArrayList<>(partETags))); - } catch (final AmazonClientException e) { + final CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder() + .parts(completedParts) + .build(); + + s3.completeMultipartUpload(CompleteMultipartUploadRequest.builder() + .bucket(objectId.bucket()) + .key(objectId.key()) + .uploadId(uploadId) + .multipartUpload(completedUpload) + .build()); + } catch (final SdkException e) { throw new IOException("Failed to complete Amazon S3 multipart upload", e); } log.trace("Completed upload to {} consisting of {} parts", objectId, partCount); uploadId = null; - partETags = null; + completedParts = null; } /** @@ -598,43 +589,42 @@ private void putObject(ByteBuffer buf, byte[] checksum) throws IOException { * @throws IOException */ private void putObject(final InputStream content, final long contentLength, byte[] checksum) throws IOException { - - final ObjectMetadata meta = new ObjectMetadata(); - meta.setContentLength(contentLength); - meta.setContentMD5( Base64.encodeAsString(checksum) ); - - final PutObjectRequest request = new PutObjectRequest(objectId.getBucket(), objectId.getKey(), content, meta); + final PutObjectRequest.Builder reqBuilder = PutObjectRequest.builder(); + reqBuilder.bucket(objectId.bucket()); + reqBuilder.key(objectId.key()); + reqBuilder.contentLength(contentLength); + reqBuilder.contentMD5( Base64.getEncoder().encodeToString(checksum) ); if( cannedAcl!=null ) { - request.withCannedAcl(cannedAcl); + reqBuilder.acl(cannedAcl); } if (storageClass != null) { - request.setStorageClass(storageClass); + reqBuilder.storageClass(storageClass); } if( tags!=null && tags.size()>0 ) { - request.setTagging( new ObjectTagging(tags) ); + reqBuilder.tagging(Tagging.builder().tagSet(tags).build() ); } if( kmsKeyId !=null ) { - request.withSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); + reqBuilder.ssekmsKeyId(kmsKeyId); } if( storageEncryption != null ) { - meta.setSSEAlgorithm( storageEncryption.toString() ); + reqBuilder.serverSideEncryption( storageEncryption ); } if( contentType != null ) { - meta.setContentType(contentType); + reqBuilder.contentType(contentType); } - + PutObjectRequest request = reqBuilder.build(); if( log.isTraceEnabled() ) { log.trace("S3 putObject {}", request); } try { - s3.putObject(request); - } catch (final AmazonClientException e) { + s3.putObject(request, RequestBody.fromInputStream(content, contentLength)); + } catch (final SdkException e) { throw new IOException("Failed to put data into Amazon S3 object", e); } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Path.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Path.java index 2a5e193b8c..5030541ff6 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Path.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/S3Path.java @@ -32,9 +32,9 @@ import java.util.Map; import javax.annotation.Nullable; -import com.amazonaws.services.s3.model.S3ObjectId; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.s3.model.Tag; +import nextflow.cloud.aws.nio.util.S3ObjectId; +import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.services.s3.model.Tag; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -64,7 +64,7 @@ public class S3Path implements Path, TagAwareFile { */ private S3FileSystem fileSystem; - private S3ObjectSummary objectSummary; + private S3Object object; private Map tags; @@ -512,23 +512,23 @@ public int hashCode() { } /** - * This method returns the cached {@link S3ObjectSummary} instance if this path has been created + * This method returns the cached {@link S3Object} instance if this path has been created * while iterating a directory structures by the {@link S3Iterator}. *
* After calling this method the cached object is reset, so any following method invocation will return {@code null}. * This is necessary to discard the object meta-data and force to reload file attributes when required. * - * @return The cached {@link S3ObjectSummary} for this path if any. + * @return The cached {@link S3Object} for this path if any. */ - public S3ObjectSummary fetchObjectSummary() { - S3ObjectSummary result = objectSummary; - objectSummary = null; + public S3Object fetchObject() { + S3Object result = object; + object = null; return result; } // note: package scope to limit the access to this setter - void setObjectSummary(S3ObjectSummary objectSummary) { - this.objectSummary = objectSummary; + void setObjectSummary(S3Object objectSummary) { + this.object = objectSummary; } @Override @@ -553,7 +553,7 @@ public List getTagsList() { // create a list of Tag out of the Map List result = new ArrayList<>(); for( Map.Entry entry : tags.entrySet()) { - result.add( new Tag(entry.getKey(), entry.getValue()) ); + result.add( Tag.builder().key(entry.getKey()).value(entry.getValue()).build() ); } return result; } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/ng/S3ParallelDownload.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/ng/S3ParallelDownload.java deleted file mode 100644 index af223dda59..0000000000 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/ng/S3ParallelDownload.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2020-2022, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.nio.ng; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InterruptedIOException; -import java.net.SocketException; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3Object; -import dev.failsafe.Failsafe; -import dev.failsafe.RetryPolicy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implements a multipart downloader for S3 - * - * @author Paolo Di Tommaso - */ -@Deprecated -public class S3ParallelDownload { - - static final private Logger log = LoggerFactory.getLogger(S3ParallelDownload.class); - - private final AmazonS3 s3Client; - private ExecutorService executor; - private ChunkBufferFactory bufferFactory; - private static List instances = new ArrayList<>(10); - private final DownloadOpts opts; - - S3ParallelDownload(AmazonS3 client) { - this(client, new DownloadOpts()); - } - - S3ParallelDownload(AmazonS3 client, DownloadOpts opts) { - if( opts.chunkSize() > opts.bufferMaxSize().toBytes() ) { - String msg = String.format("S3 download chunk size cannot be greater than download max buffer size - offending values chunk size=%s, buffer size=%s", opts.chunkSizeMem(), opts.bufferMaxSize()); - throw new IllegalArgumentException(msg); - } - this.s3Client = client; - this.opts = opts; - this.executor = Executors.newFixedThreadPool(opts.numWorkers(), CustomThreadFactory.withName("S3-download")); - int poolCapacity = (int)Math.ceil((float)opts.bufferMaxSize().toBytes() / opts.chunkSize()); - this.bufferFactory = new ChunkBufferFactory(opts.chunkSize(), poolCapacity); - log.debug("Creating S3 download thread pool: {}; pool-capacity={}", opts, poolCapacity); - } - - static public S3ParallelDownload create(AmazonS3 client, DownloadOpts opts) { - S3ParallelDownload result = new S3ParallelDownload(client, opts); - instances.add(result); - return result; - } - - private void shutdown0(boolean hard) { - if( hard ) - executor.shutdownNow(); - else - executor.shutdown(); - try { - executor.awaitTermination(1, TimeUnit.HOURS); - } - catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - static public void shutdown(boolean hard) { - log.debug("Shutdown S3 downloader"); - for( S3ParallelDownload it : instances ) { - it.shutdown0(hard); - } - log.debug("Shutdown S3 downloader - done"); - } - - protected List prepareGetPartRequests(String bucketName, String key) { - // Use range to download in parallel - long size = s3Client.getObjectMetadata(bucketName, key).getContentLength(); - int numberOfParts = (int) Math.ceil((double) size / opts.chunkSize()); - List result = new ArrayList<>(numberOfParts); - for( int index=0; index size ? size - 1 : (long)(index + 1) * opts.chunkSize() - 1; - result.add( new GetObjectRequest(bucketName, key).withRange(x, y) ); - } - return result; - } - - public InputStream download(String bucketName, String key) { - List parts = prepareGetPartRequests(bucketName, key); - Function task = this::safeDownload; - FutureIterator itr = new FutureIterator<>(parts, task, executor, opts.numWorkers() * 2); - return new FutureInputStream(itr); - } - - private ChunkBuffer safeDownload(final GetObjectRequest req) { - RetryPolicy retryPolicy = RetryPolicy.builder() - .handle(SocketException.class) - .withBackoff(50, opts.maxDelayMillis(), ChronoUnit.MILLIS) - .withMaxAttempts(opts.maxAttempts()) - .onFailedAttempt(e -> log.error(String.format("Failed to download chunk #%s file s3://%s/%s", req.getPartNumber(), req.getBucketName(), req.getKey()), e.getLastFailure())) - .build(); - - return Failsafe.with(retryPolicy).get(() -> doDownload(req)); - } - - private ChunkBuffer doDownload(final GetObjectRequest req) throws IOException { - try (S3Object chunk = s3Client.getObject(req)) { - final long start = req.getRange()[0]; - final long end = req.getRange()[1]; - final String path = "s3://" + req.getBucketName() + '/' + req.getKey(); - - ChunkBuffer result = bufferFactory.create(); - try (InputStream stream = chunk.getObjectContent()) { - result.fill(stream); - } - catch (Throwable e) { - String msg = String.format("Failed to download chunk range=%s..%s; path=%s", start, end, path); - throw new IOException(msg, e); - } - log.trace("Downloaded chunk range={}..{}; path={}", start, end, path); - // return it - result.makeReadable(); - return result; - } - catch (InterruptedException e) { - throw new InterruptedIOException(); - } - } -} diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3AsyncClientConfiguration.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3AsyncClientConfiguration.java new file mode 100644 index 0000000000..1ac41a712b --- /dev/null +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3AsyncClientConfiguration.java @@ -0,0 +1,105 @@ +/* + * Copyright 2020-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package nextflow.cloud.aws.nio.util; + +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.http.crt.ProxyConfiguration; + +import java.time.Duration; +import java.util.Properties; + +/** + * Class to convert Amazon properties in S3 asynchronous client configuration + * + * @author Jorge Ejarque + */ +public class S3AsyncClientConfiguration extends S3ClientConfiguration{ + + private AwsCrtAsyncHttpClient.Builder httpClientBuilder; + + private AwsCrtAsyncHttpClient.Builder httpClientBuilder(){ + if( this.httpClientBuilder == null) + this.httpClientBuilder = AwsCrtAsyncHttpClient.builder(); + return this.httpClientBuilder; + } + + public SdkAsyncHttpClient.Builder getHttpClientBuilder(){ + if ( this.httpClientBuilder == null ) + return null; + return this.httpClientBuilder; + } + + private S3AsyncClientConfiguration(){ + super(); + } + + + private void setHttpClientBuilder(Properties props){ + if( props.containsKey("connection_timeout") ) { + log.trace("AWS client config - connection_timeout: {}", props.getProperty("connection_timeout")); + httpClientBuilder().connectionTimeout(Duration.ofMillis(Long.parseLong(props.getProperty("connection_timeout")))); + } + + if( props.containsKey("max_connections")) { + log.trace("AWS client config - max_connections: {}", props.getProperty("max_connections")); + httpClientBuilder().maxConcurrency(Integer.parseInt(props.getProperty("max_connections"))); + } + + if( props.containsKey("socket_timeout")) { + log.warn("AWS client config - 'socket_timeout' doesn't exist in AWS SDK V2 Async Client"); + } + + if( props.containsKey("proxy_host")) { + final String host = props.getProperty("proxy_host"); + final ProxyConfiguration.Builder proxyConfig = ProxyConfiguration.builder(); + log.trace("AWS client config - proxy host {}", host); + proxyConfig.host(host); + if (props.containsKey("proxy_port")) { + proxyConfig.port(Integer.parseInt(props.getProperty("proxy_port"))); + } + if (props.containsKey("proxy_username")) { + proxyConfig.username(props.getProperty("proxy_username")); + } + if (props.containsKey("proxy_password")) { + proxyConfig.password(props.getProperty("proxy_password")); + } + if (props.containsKey("proxy_scheme")) { + proxyConfig.scheme(props.getProperty("proxy_scheme")); + } + if (props.containsKey("proxy_domain")) { + log.warn("AWS client config 'proxy_domain' doesn't exist in AWS SDK V2 Async Client"); + } + if (props.containsKey("proxy_workstation")) { + log.warn("AWS client config 'proxy_workstation' doesn't exist in AWS SDK V2 Async Client"); + } + httpClientBuilder().proxyConfiguration(proxyConfig.build()); + } + + } + + public static S3AsyncClientConfiguration create(Properties props) { + S3AsyncClientConfiguration config = new S3AsyncClientConfiguration(); + + if( props != null ){ + config.setClientOverrideConfiguration(props); + config.setHttpClientBuilder(props); + } + return config; + } +} + diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ClientConfiguration.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ClientConfiguration.java new file mode 100644 index 0000000000..2ecb48d79b --- /dev/null +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ClientConfiguration.java @@ -0,0 +1,96 @@ +/* + * Copyright 2020-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package nextflow.cloud.aws.nio.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.retries.StandardRetryStrategy; + +import java.util.Properties; + +/** + * Class to convert Amazon properties in S3 client override configuration + * + * @author Jorge Ejarque + */ +public class S3ClientConfiguration { + + protected static final Logger log = LoggerFactory.getLogger(S3ClientConfiguration.class); + + private ClientOverrideConfiguration.Builder cocBuilder; + + private ClientOverrideConfiguration.Builder cocBuilder(){ + if( this.cocBuilder == null ) + this.cocBuilder = ClientOverrideConfiguration.builder(); + return this.cocBuilder; + } + + public ClientOverrideConfiguration getClientOverrideConfiguration(){ + if( cocBuilder == null ) + return null; + return cocBuilder.build(); + } + + protected S3ClientConfiguration(){} + + + protected final void setClientOverrideConfiguration(Properties props) { + if( props == null ) + return; + + if( props.containsKey("max_error_retry")) { + log.trace("AWS client config - max_error_retry: {}", props.getProperty("max_error_retry")); + cocBuilder().retryStrategy(StandardRetryStrategy.builder().maxAttempts((Integer.parseInt(props.getProperty("max_error_retry")) + 1 )).build()); + } + + if( props.containsKey("protocol")) { + log.warn("AWS client config 'protocol' doesn't exist in AWS SDK V2"); + } + + if ( props.containsKey("signer_override")) { + log.warn("AWS client config - 'signerOverride' is deprecated"); + cocBuilder().putAdvancedOption(SdkAdvancedClientOption.SIGNER, resolveSigner(props.getProperty("signer_override"))); + } + + if( props.containsKey("socket_send_buffer_size_hints") || props.containsKey("socket_recv_buffer_size_hints") ) { + log.warn("AWS client config - 'socket_send_buffer_size_hints' and 'socket_recv_buffer_size_hints' do not exist in AWS SDK V2" ); + } + + if( props.containsKey("user_agent")) { + log.trace("AWS client config - user_agent: {}", props.getProperty("user_agent")); + cocBuilder().putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX, props.getProperty("user_agent")); + } + } + + private static Signer resolveSigner(String signerOverride) { + switch (signerOverride) { + case "AWSS3V4SignerType": + case "S3SignerType": + return AwsS3V4Signer.create(); + case "AWS4SignerType": + return Aws4Signer.create(); + default: + throw new IllegalArgumentException("Unsupported signer: " + signerOverride); + } +} +} + diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3CopyStream.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3CopyStream.java deleted file mode 100644 index d927f6519f..0000000000 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3CopyStream.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright 2020-2022, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.nio.util; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.List; - -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.ObjectTagging; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.S3ObjectId; -import com.amazonaws.services.s3.model.SSEAlgorithm; -import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; -import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.model.Tag; -import com.amazonaws.util.Base64; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import static java.util.Objects.requireNonNull; - -/** - * Parallel S3 multipart uploader. Based on the following code request - * See https://github.com/Upplication/Amazon-S3-FileSystem-NIO2/pulls - * - * @author Paolo Di Tommaso - * @author Tom Wieczorek - */ - -public final class S3CopyStream extends OutputStream { - - private static final Logger log = LoggerFactory.getLogger(S3CopyStream.class); - - /** - * Amazon S3 API implementation to use. - */ - private final AmazonS3 s3; - - /** - * ID of the S3 object to store data into. - */ - private final S3ObjectId objectId; - - /** - * Amazon S3 storage class to apply to the newly created S3 object, if any. - */ - private StorageClass storageClass; - - private SSEAlgorithm storageEncryption; - - private String kmsKeyId; - - /** - * Indicates if the stream has been closed. - */ - private volatile boolean closed; - - /** - * Indicates if the upload has been aborted - */ - private volatile boolean aborted; - - private MessageDigest md5; - - private CannedAccessControlList cannedAcl; - - private List tags; - - private CopyOutputStream buffer; - - /** - * Creates a new {@code S3OutputStream} that writes data directly into the S3 object with the given {@code objectId}. - * No special object metadata or storage class will be attached to the object. - * - */ - public S3CopyStream(final AmazonS3 s3, S3ObjectId objectId) { - this.s3 = requireNonNull(s3); - this.objectId = requireNonNull(objectId); - this.md5 = createMd5(); - this.buffer = new CopyOutputStream(); - } - - public S3CopyStream setCannedAcl(CannedAccessControlList acl) { - this.cannedAcl = acl; - return this; - } - - public S3CopyStream setTags(List tags) { - this.tags = tags; - return this; - } - - public S3CopyStream setStorageClass(String storageClass) { - if( storageClass!=null ) - this.storageClass = StorageClass.fromValue(storageClass); - return this; - } - - public S3CopyStream setStorageEncryption(String storageEncryption) { - if( storageEncryption!=null ) - this.storageEncryption = SSEAlgorithm.fromString(storageEncryption); - return this; - } - - public S3CopyStream setKmsKeyId(String kmsKeyId) { - this.kmsKeyId = kmsKeyId; - return this; - } - - /** - * @return A MD5 message digester - */ - private MessageDigest createMd5() { - try { - return MessageDigest.getInstance("MD5"); - } - catch(NoSuchAlgorithmException e) { - throw new IllegalStateException("Cannot find a MD5 algorithm provider",e); - } - } - - public void write(byte b[], int off, int len) throws IOException { - if( closed ){ - throw new IOException("Can't write into a closed stream"); - } - buffer.write(b,off,len); - md5.update(b,off,len); - } - - /** - * Writes a byte into the uploader buffer. When it is full starts the upload process - * in a asynchronous manner - * - * @param b The byte to be written - * @throws IOException - */ - @Override - public void write (int b) throws IOException { - if( closed ){ - throw new IOException("Can't write into a closed stream"); - } - buffer.write((byte) b); - md5.update((byte) b); - } - - - /** - * Close the stream uploading any remaining buffered data - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (closed) { - return; - } - - putObject(buffer.toInputStream(), buffer.size(), md5.digest()); - closed = true; - } - - /** - * Stores the given buffer using a single-part upload process - * - * @param contentLength - * @param content - * @throws IOException - */ - private void putObject(final InputStream content, final long contentLength, byte[] checksum) throws IOException { - - final ObjectMetadata meta = new ObjectMetadata(); - meta.setContentLength(contentLength); - meta.setContentMD5( Base64.encodeAsString(checksum) ); - - final PutObjectRequest request = new PutObjectRequest(objectId.getBucket(), objectId.getKey(), content, meta); - if( cannedAcl!=null ) { - request.withCannedAcl(cannedAcl); - } - - if (storageClass != null) { - request.setStorageClass(storageClass); - } - - if( tags!=null && tags.size()>0 ) { - request.setTagging( new ObjectTagging(tags) ); - } - - if( kmsKeyId !=null ) { - request.withSSEAwsKeyManagementParams( new SSEAwsKeyManagementParams(kmsKeyId) ); - } - - if( storageEncryption != null ) { - meta.setSSEAlgorithm( storageEncryption.toString() ); - } - - if( log.isTraceEnabled() ) { - log.trace("S3 putObject {}", request); - } - - try { - s3.putObject(request); - } catch (final AmazonClientException e) { - throw new IOException("Failed to put data into Amazon S3 object", e); - } - } - -} diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/CopyOutputStream.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectId.java similarity index 50% rename from plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/CopyOutputStream.java rename to plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectId.java index 293f4dcda3..34eda89245 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/CopyOutputStream.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectId.java @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022, Seqera Labs + * Copyright 2020-2025, Seqera Labs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,19 +17,33 @@ package nextflow.cloud.aws.nio.util; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.InputStream; - /** - * https://stackoverflow.com/a/31809148/395921 - * - * @author Paolo Di Tommaso + * Class to mimic Old V1 S3ObjectId */ -public class CopyOutputStream extends ByteArrayOutputStream { +public class S3ObjectId { + private final String bucket; + private final String key; + private final String versionId; + + public S3ObjectId(String bucket, String key, String versionId) { + this.bucket = bucket; + this.key = key; + this.versionId = versionId; + } + + public S3ObjectId(String bucket, String key) { + this(bucket, key, null); + } + + public String bucket() { + return bucket; + } + + public String key() { + return key; + } - //Creates InputStream without actually copying the buffer and using up mem for that. - public InputStream toInputStream(){ - return new ByteArrayInputStream(buf, 0, count); + public String versionId() { + return versionId; } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectSummaryLookup.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectSummaryLookup.java index ce1e0e75cd..306208d5c1 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectSummaryLookup.java +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3ObjectSummaryLookup.java @@ -17,37 +17,31 @@ package nextflow.cloud.aws.nio.util; -import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.List; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import nextflow.cloud.aws.nio.S3Client; +import software.amazon.awssdk.services.s3.model.*; import nextflow.cloud.aws.nio.S3Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class S3ObjectSummaryLookup { - private static final Logger log = LoggerFactory.getLogger(S3ObjectSummary.class); + private static final Logger log = LoggerFactory.getLogger(S3Object.class); /** - * Get the {@link com.amazonaws.services.s3.model.S3ObjectSummary} that represent this Path or its first child if the path does not exist + * Get the {@link software.amazon.awssdk.services.s3.model.S3Object} that represent this Path or its first child if the path does not exist * @param s3Path {@link S3Path} - * @return {@link com.amazonaws.services.s3.model.S3ObjectSummary} + * @return {@link software.amazon.awssdk.services.s3.model.S3Object} * @throws java.nio.file.NoSuchFileException if not found the path and any child */ - public S3ObjectSummary lookup(S3Path s3Path) throws NoSuchFileException { + public S3Object lookup(S3Path s3Path) throws NoSuchFileException { /* * check is object summary has been cached */ - S3ObjectSummary summary = s3Path.fetchObjectSummary(); + S3Object summary = s3Path.fetchObject(); if( summary != null ) { return summary; } @@ -58,16 +52,17 @@ public S3ObjectSummary lookup(S3Path s3Path) throws NoSuchFileException { * when `key` is an empty string retrieve the object meta-data of the bucket */ if( "".equals(s3Path.getKey()) ) { - ObjectMetadata meta = client.getObjectMetadata(s3Path.getBucket(), ""); + HeadObjectResponse meta = client.getObjectMetadata(s3Path.getBucket(), ""); if( meta == null ) throw new NoSuchFileException("s3://" + s3Path.getBucket()); - summary = new S3ObjectSummary(); - summary.setBucketName(s3Path.getBucket()); - summary.setETag(meta.getETag()); - summary.setKey(s3Path.getKey()); - summary.setLastModified(meta.getLastModified()); - summary.setSize(meta.getContentLength()); + summary = S3Object.builder() + .eTag(meta.eTag()) + .key(s3Path.getKey()) + .lastModified(meta.lastModified()) + .size(meta.contentLength()) + .build(); + // TODO summary.setOwner(?); // TODO summary.setStorageClass(?); return summary; @@ -79,28 +74,28 @@ public S3ObjectSummary lookup(S3Path s3Path) throws NoSuchFileException { */ String marker = null; while( true ) { - ListObjectsRequest request = new ListObjectsRequest(); - request.setBucketName(s3Path.getBucket()); - request.setPrefix(s3Path.getKey()); - request.setMaxKeys(250); + ListObjectsRequest.Builder request = ListObjectsRequest.builder(); + request.bucket(s3Path.getBucket()); + request.prefix(s3Path.getKey()); + request.maxKeys(250); if( marker != null ) - request.setMarker(marker); + request.marker(marker); - ObjectListing listing = client.listObjects(request); - List results = listing.getObjectSummaries(); + ListObjectsResponse listing = client.listObjects(request.build()); + List results = listing.contents(); if (results.isEmpty()){ break; } - for( S3ObjectSummary item : results ) { + for( S3Object item : results ) { if( matchName(s3Path.getKey(), item)) { return item; } } if( listing.isTruncated() ) - marker = listing.getNextMarker(); + marker = listing.nextMarker(); else break; } @@ -108,8 +103,8 @@ public S3ObjectSummary lookup(S3Path s3Path) throws NoSuchFileException { throw new NoSuchFileException("s3://" + s3Path.getBucket() + "/" + s3Path.getKey()); } - private boolean matchName(String fileName, S3ObjectSummary summary) { - String foundKey = summary.getKey(); + private boolean matchName(String fileName, S3Object summary) { + String foundKey = summary.key(); // they are different names return false if( !foundKey.startsWith(fileName) ) { @@ -123,62 +118,13 @@ private boolean matchName(String fileName, S3ObjectSummary summary) { return foundKey.charAt(fileName.length()) == '/'; } - public ObjectMetadata getS3ObjectMetadata(S3Path s3Path) { + public HeadObjectResponse getS3ObjectMetadata(S3Path s3Path) { S3Client client = s3Path.getFileSystem().getClient(); try { return client.getObjectMetadata(s3Path.getBucket(), s3Path.getKey()); } - catch (AmazonS3Exception e){ - if (e.getStatusCode() != 404){ - throw e; - } - return null; - } - } - - /** - * get S3Object represented by this S3Path try to access with or without end slash '/' - * @param s3Path S3Path - * @return S3Object or null if it does not exist - */ - @Deprecated - private S3Object getS3Object(S3Path s3Path){ - - S3Client client = s3Path.getFileSystem() - .getClient(); - - S3Object object = getS3Object(s3Path.getBucket(), s3Path.getKey(), client); - - if (object != null) { - return object; - } - else{ - return getS3Object(s3Path.getBucket(), s3Path.getKey() + "/", client); - } - } - - /** - * get s3Object with S3Object#getObjectContent closed - * @param bucket String bucket - * @param key String key - * @param client S3Client client - * @return S3Object - */ - private S3Object getS3Object(String bucket, String key, S3Client client){ - try { - S3Object object = client .getObject(bucket, key); - if (object.getObjectContent() != null){ - try { - object.getObjectContent().close(); - } - catch (IOException e ) { - log.debug("Error while closing S3Object for bucket: `{}` and key: `{}` -- Cause: {}",bucket, key, e.getMessage()); - } - } - return object; - } - catch (AmazonS3Exception e){ - if (e.getStatusCode() != 404){ + catch (S3Exception e){ + if (e.statusCode() != 404){ throw e; } return null; diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3SyncClientConfiguration.java b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3SyncClientConfiguration.java new file mode 100644 index 0000000000..82ea1a4eb2 --- /dev/null +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/nio/util/S3SyncClientConfiguration.java @@ -0,0 +1,112 @@ +/* + * Copyright 2020-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package nextflow.cloud.aws.nio.util; + +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.apache.ProxyConfiguration; + +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.Properties; + +/** + * Class to convert Amazon properties in S3 synchronous client configuration + * + * @author Jorge Ejarque + */ +public class S3SyncClientConfiguration extends S3ClientConfiguration{ + + private ApacheHttpClient.Builder httpClientBuilder; + + private ApacheHttpClient.Builder httpClientBuilder(){ + if( this.httpClientBuilder == null) + this.httpClientBuilder = ApacheHttpClient.builder(); + return this.httpClientBuilder; + } + + public SdkHttpClient.Builder getHttpClientBuilder(){ + if ( this.httpClientBuilder == null ) + return null; + return this.httpClientBuilder; + } + + private S3SyncClientConfiguration(){ + super(); + } + + private void setClientHttpBuilder(Properties props) { + if( props.containsKey("connection_timeout") ) { + log.trace("AWS client config - connection_timeout: {}", props.getProperty("connection_timeout")); + httpClientBuilder().connectionTimeout(Duration.ofMillis(Long.parseLong(props.getProperty("connection_timeout")))); + } + + if( props.containsKey("max_connections")) { + log.trace("AWS client config - max_connections: {}", props.getProperty("max_connections")); + httpClientBuilder().maxConnections(Integer.parseInt(props.getProperty("max_connections"))); + } + + if( props.containsKey("socket_timeout")) { + log.trace("AWS client config - socket_timeout: {}", props.getProperty("socket_timeout")); + httpClientBuilder().socketTimeout(Duration.ofMillis(Long.parseLong(props.getProperty("socket_timeout")))); + } + + try { + if( props.containsKey("proxy_host")) { + final String host = props.getProperty("proxy_host"); + final int port = Integer.parseInt(props.getProperty("proxy_port", "-1")); + final String scheme = props.getProperty("proxy_scheme", "http"); + final ProxyConfiguration.Builder proxyConfig = ProxyConfiguration.builder(); + log.trace("AWS client config - proxy {}://{}:{}", scheme, host, port); + proxyConfig.endpoint(new URI(scheme, null, host, port, null, null, null)); + + if (props.containsKey("proxy_username")) { + proxyConfig.username(props.getProperty("proxy_username")); + } + if (props.containsKey("proxy_password")) { + proxyConfig.password(props.getProperty("proxy_password")); + } + + if (props.containsKey("proxy_domain")) { + proxyConfig.ntlmDomain(props.getProperty("proxy_domain")); + } + if (props.containsKey("proxy_workstation")) { + proxyConfig.ntlmWorkstation(props.getProperty("proxy_workstation")); + } + + httpClientBuilder().proxyConfiguration(proxyConfig.build()); + } + } catch (URISyntaxException e){ + log.warn("Exception creating AWS client config - proxy URI", e); + } + } + + public static S3SyncClientConfiguration create(Properties props) { + S3SyncClientConfiguration config = new S3SyncClientConfiguration(); + + if( props != null ) { + config.setClientOverrideConfiguration(props); + config.setClientHttpBuilder(props); + } + + return config; + } + + +} + diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/AwsHelper.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/AwsHelper.groovy index 443cc41bc9..e61f170d9d 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/AwsHelper.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/AwsHelper.groovy @@ -16,7 +16,7 @@ package nextflow.cloud.aws.util -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import com.google.common.base.CaseFormat /** @@ -26,13 +26,13 @@ import com.google.common.base.CaseFormat */ class AwsHelper { - static CannedAccessControlList parseS3Acl(String value) { + static ObjectCannedACL parseS3Acl(String value) { if( !value ) return null return value.contains('-') - ? CannedAccessControlList.valueOf(CaseFormat.LOWER_HYPHEN.to(CaseFormat.UPPER_CAMEL,value)) - : CannedAccessControlList.valueOf(value) + ? ObjectCannedACL.valueOf(CaseFormat.LOWER_HYPHEN.to(CaseFormat.UPPER_UNDERSCORE, value)) + : ObjectCannedACL.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE,value)) } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3BashLib.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3BashLib.groovy index 3d2aa346fd..3f5c569ee3 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3BashLib.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3BashLib.groovy @@ -16,12 +16,12 @@ package nextflow.cloud.aws.util -import com.amazonaws.services.s3.model.CannedAccessControlList import groovy.transform.CompileStatic import nextflow.Global import nextflow.Session import nextflow.cloud.aws.batch.AwsOptions import nextflow.executor.BashFunLib +import software.amazon.awssdk.services.s3.model.ObjectCannedACL /** * AWS S3 helper class @@ -79,7 +79,7 @@ class S3BashLib extends BashFunLib { return this } - S3BashLib withAcl(CannedAccessControlList value) { + S3BashLib withAcl(ObjectCannedACL value) { if( value ) this.acl = "--acl $value " return this diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3CredentialsProvider.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3CredentialsProvider.groovy index dd494b6797..5259dd6122 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3CredentialsProvider.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/S3CredentialsProvider.groovy @@ -17,47 +17,44 @@ package nextflow.cloud.aws.util -import com.amazonaws.AmazonClientException -import com.amazonaws.auth.AWSCredentials -import com.amazonaws.auth.AWSCredentialsProvider -import com.amazonaws.auth.AnonymousAWSCredentials +import software.amazon.awssdk.auth.credentials.AwsCredentials +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider import groovy.transform.CompileStatic import groovy.util.logging.Slf4j /** * AWS credentials provider that delegates the credentials to the - * specified provider class and fallback to the {@link AnonymousAWSCredentials} + * specified provider class and fallback to the {@link AnonymousCredentialsProvider} * when no credentials are available. * - * See also {@link com.amazonaws.services.s3.S3CredentialsProviderChain} + * See also {@link software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain} * * @author Paolo Di Tommaso */ @Slf4j @CompileStatic -class S3CredentialsProvider implements AWSCredentialsProvider { +class S3CredentialsProvider implements AwsCredentialsProvider { - private AWSCredentialsProvider target + private AwsCredentialsProvider target - private volatile AWSCredentials anonymous + private volatile AwsCredentials anonymous - S3CredentialsProvider(AWSCredentialsProvider target) { + S3CredentialsProvider(AwsCredentialsProvider target) { this.target = target } @Override - AWSCredentials getCredentials() { - if( anonymous!=null ) + AwsCredentials resolveCredentials() { + if (anonymous != null) { return anonymous + } try { - return target.getCredentials(); - } catch (AmazonClientException e) { - log.debug("No AWS credentials available - falling back to anonymous access"); + return target.resolveCredentials() + } catch (Exception e) { + log.debug("No AWS credentials available - falling back to anonymous access") } - return anonymous=new AnonymousAWSCredentials() + anonymous = AnonymousCredentialsProvider.create().resolveCredentials() + return anonymous } - @Override - void refresh() { - target.refresh() - } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/SsoCredentialsProviderV1.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/SsoCredentialsProviderV1.groovy deleted file mode 100644 index 7f60f45c9a..0000000000 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/util/SsoCredentialsProviderV1.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2013-2024, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.util - -import com.amazonaws.auth.AWSCredentials -import com.amazonaws.auth.AWSCredentialsProvider -import com.amazonaws.auth.BasicAWSCredentials -import com.amazonaws.auth.BasicSessionCredentials -import groovy.transform.CompileStatic -import groovy.util.logging.Slf4j -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials -import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider - -/** - * Adapter for the SSO credentials provider from the SDK v2. - * - * @author Ben Sherman - */ -@Slf4j -@CompileStatic -class SsoCredentialsProviderV1 implements AWSCredentialsProvider { - - private ProfileCredentialsProvider delegate - - SsoCredentialsProviderV1() { - this.delegate = ProfileCredentialsProvider.create() - } - - SsoCredentialsProviderV1(String profile) { - this.delegate = ProfileCredentialsProvider.create(profile) - } - - @Override - AWSCredentials getCredentials() { - final credentials = delegate.resolveCredentials() - - if( credentials instanceof AwsSessionCredentials ) - new BasicSessionCredentials( - credentials.accessKeyId(), - credentials.secretAccessKey(), - credentials.sessionToken()) - - else - new BasicAWSCredentials( - credentials.accessKeyId(), - credentials.secretAccessKey()) - } - - @Override - void refresh() { - throw new UnsupportedOperationException() - } -} diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchProxyTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchProxyTest.groovy index 04e87ce3e0..4dd6376649 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchProxyTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchProxyTest.groovy @@ -16,11 +16,11 @@ package nextflow.cloud.aws.batch -import com.amazonaws.services.batch.AWSBatchClient -import com.amazonaws.services.batch.model.DescribeJobDefinitionsRequest -import com.amazonaws.services.batch.model.DescribeJobDefinitionsResult -import com.amazonaws.services.batch.model.DescribeJobsRequest -import com.amazonaws.services.batch.model.DescribeJobsResult +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsResponse +import software.amazon.awssdk.services.batch.model.DescribeJobsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobsResponse import nextflow.util.ThrottlingExecutor import spock.lang.Specification /** @@ -32,7 +32,7 @@ class AwsBatchProxyTest extends Specification { def 'should get client instance' () { given: - def client = Mock(AWSBatchClient) + def client = Mock(BatchClient) def exec = Mock(ThrottlingExecutor) when: @@ -52,10 +52,10 @@ class AwsBatchProxyTest extends Specification { def 'should invoke executor with normal priority' () { given: - def client = Mock(AWSBatchClient) + def client = Mock(BatchClient) def exec = Mock(ThrottlingExecutor) - def req = Mock(DescribeJobDefinitionsRequest) - def resp = Mock(DescribeJobDefinitionsResult) + def req = DescribeJobDefinitionsRequest.builder().build() as DescribeJobDefinitionsRequest + def resp = DescribeJobDefinitionsResponse.builder().build() def ZERO = 0 as byte when: @@ -70,10 +70,10 @@ class AwsBatchProxyTest extends Specification { def 'should invoke executor with higher priority' () { given: - def client = Mock(AWSBatchClient) + def client = Mock(BatchClient) def exec = Mock(ThrottlingExecutor) - def req = Mock(DescribeJobsRequest) - def resp = Mock(DescribeJobsResult) + def req = DescribeJobsRequest.builder().build() as DescribeJobsRequest + def resp = DescribeJobsResponse.builder().build() def _10 = 10 as byte when: diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy index da217efce0..ec27521e1a 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy @@ -16,24 +16,26 @@ package nextflow.cloud.aws.batch +import software.amazon.awssdk.services.batch.model.DescribeJobsResponse +import software.amazon.awssdk.services.batch.model.ResourceType +import software.amazon.awssdk.services.batch.model.SubmitJobResponse + import java.nio.file.Path import java.time.Instant -import com.amazonaws.services.batch.AWSBatch -import com.amazonaws.services.batch.model.ContainerProperties -import com.amazonaws.services.batch.model.DescribeJobDefinitionsRequest -import com.amazonaws.services.batch.model.DescribeJobDefinitionsResult -import com.amazonaws.services.batch.model.DescribeJobsRequest -import com.amazonaws.services.batch.model.DescribeJobsResult -import com.amazonaws.services.batch.model.EvaluateOnExit -import com.amazonaws.services.batch.model.JobDefinition -import com.amazonaws.services.batch.model.JobDetail -import com.amazonaws.services.batch.model.KeyValuePair -import com.amazonaws.services.batch.model.RegisterJobDefinitionRequest -import com.amazonaws.services.batch.model.RegisterJobDefinitionResult -import com.amazonaws.services.batch.model.RetryStrategy -import com.amazonaws.services.batch.model.SubmitJobRequest -import com.amazonaws.services.batch.model.SubmitJobResult +import software.amazon.awssdk.services.batch.BatchClient +import software.amazon.awssdk.services.batch.model.ContainerProperties +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsRequest +import software.amazon.awssdk.services.batch.model.DescribeJobDefinitionsResponse +import software.amazon.awssdk.services.batch.model.DescribeJobsRequest +import software.amazon.awssdk.services.batch.model.EvaluateOnExit +import software.amazon.awssdk.services.batch.model.JobDefinition +import software.amazon.awssdk.services.batch.model.JobDetail +import software.amazon.awssdk.services.batch.model.KeyValuePair +import software.amazon.awssdk.services.batch.model.RegisterJobDefinitionRequest +import software.amazon.awssdk.services.batch.model.RegisterJobDefinitionResponse +import software.amazon.awssdk.services.batch.model.RetryStrategy +import software.amazon.awssdk.services.batch.model.SubmitJobRequest import nextflow.BuildInfo import nextflow.Global import nextflow.Session @@ -53,6 +55,7 @@ import nextflow.processor.TaskRun import nextflow.processor.TaskStatus import nextflow.script.BaseScript import nextflow.script.ProcessConfig +import nextflow.util.CacheHelper import nextflow.util.MemoryUnit import spock.lang.Specification import spock.lang.Unroll @@ -83,8 +86,8 @@ class AwsBatchTaskHandlerTest extends Specification { def 'should create an aws submit request'() { given: - def VAR_FOO = new KeyValuePair().withName('FOO').withValue('1') - def VAR_BAR = new KeyValuePair().withName('BAR').withValue('2') + def VAR_FOO = KeyValuePair.builder().name('FOO').value('1').build() + def VAR_BAR = KeyValuePair.builder().name('BAR').value('2').build() def task = Mock(TaskRun) task.getName() >> 'batch-task' task.getConfig() >> new TaskConfig(memory: '8GB', cpus: 4, maxRetries: 2, errorStrategy: 'retry') @@ -101,16 +104,17 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' - req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '8192' - req.getContainerOverrides().getEnvironment() == [VAR_FOO, VAR_BAR] - req.getContainerOverrides().getCommand() == ['bash', '-c', 'something'] - req.getRetryStrategy() == new RetryStrategy() - .withAttempts(5) - .withEvaluateOnExit( new EvaluateOnExit().withAction('RETRY').withOnStatusReason('Host EC2*'), new EvaluateOnExit().withOnReason('*').withAction('EXIT') ) + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.VCPU}.value() == '4' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.MEMORY}.value() == '8192' + req.containerOverrides().environment() == [VAR_FOO, VAR_BAR] + req.containerOverrides().command() == ['bash', '-c', 'something'] + req.retryStrategy() == RetryStrategy.builder() + .attempts(5) + .evaluateOnExit( EvaluateOnExit.builder().action('RETRY').onStatusReason('Host EC2*').build(), EvaluateOnExit.builder().onReason('*').action('EXIT').build() ) + .build() when: req = handler.newSubmitRequest(task) @@ -122,14 +126,14 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' - req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '8192' - req.getContainerOverrides().getEnvironment() == [VAR_FOO, VAR_BAR] - req.getContainerOverrides().getCommand() == ['bash', '-c', 'something'] - req.getRetryStrategy() == null // <-- retry is managed by NF, hence this must be null + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.VCPU}.value() == '4' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.MEMORY}.value() == '8192' + req.containerOverrides().environment() == [VAR_FOO, VAR_BAR] + req.containerOverrides().command() == ['bash', '-c', 'something'] + req.retryStrategy() == null // <-- retry is managed by NF, hence this must be null } @@ -152,12 +156,12 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [] - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' - req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '8192' - req.getContainerOverrides().getCommand() == ['bash', '-c', 'something'] + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.VCPU}.value() == '4' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.MEMORY}.value() == '8192' + req.containerOverrides().command() == ['bash', '-c', 'something'] when: def req2 = handler.newSubmitRequest(task) @@ -169,14 +173,14 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [] - req2.getJobName() == 'batch-task' - req2.getJobQueue() == 'queue1' - req2.getJobDefinition() == 'job-def:1' - req2.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req2.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '8192' - req2.getContainerOverrides().getCommand() ==['bash', '-c', 'something'] - req2.getShareIdentifier() == 'priority/high' - req2.getSchedulingPriorityOverride() == 9999 + req2.jobName() == 'batch-task' + req2.jobQueue() == 'queue1' + req2.jobDefinition() == 'job-def:1' + req2.containerOverrides().resourceRequirements().find { it.type() == ResourceType.VCPU}.value() == '4' + req2.containerOverrides().resourceRequirements().find { it.type() == ResourceType.MEMORY}.value() == '8192' + req2.containerOverrides().command() ==['bash', '-c', 'something'] + req2.shareIdentifier() == 'priority/high' + req2.schedulingPriorityOverride() == 9999 } @@ -203,12 +207,12 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue1' 1 * handler.getJobDefinition(task) >> 'job-def:1' and: - def res = req.getContainerOverrides().getResourceRequirements() + def res = req.containerOverrides().resourceRequirements() res.size()==3 and: - req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '2048' - req.getContainerOverrides().getResourceRequirements().find { it.type=='GPU'}.getValue() == '2' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.VCPU}.value() == '4' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.MEMORY}.value() == '2048' + req.containerOverrides().resourceRequirements().find { it.type() == ResourceType.GPU}.value() == '2' } @@ -236,10 +240,10 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue1' 1 * handler.getJobDefinition(task) >> 'job-def:1' and: - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' - req.getTimeout() == null + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' + req.timeout() == null when: req = handler.newSubmitRequest(task) @@ -253,11 +257,11 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue2' 1 * handler.getJobDefinition(task) >> 'job-def:2' and: - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue2' - req.getJobDefinition() == 'job-def:2' + req.jobName() == 'batch-task' + req.jobQueue() == 'queue2' + req.jobDefinition() == 'job-def:2' // minimal allowed timeout is 60 seconds - req.getTimeout().getAttemptDurationSeconds() == 60 + req.timeout().attemptDurationSeconds() == 60 when: @@ -272,20 +276,20 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue3' 1 * handler.getJobDefinition(task) >> 'job-def:3' and: - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue3' - req.getJobDefinition() == 'job-def:3' + req.jobName() == 'batch-task' + req.jobQueue() == 'queue3' + req.jobDefinition() == 'job-def:3' // minimal allowed timeout is 60 seconds - req.getTimeout().getAttemptDurationSeconds() == 3600 + req.timeout().attemptDurationSeconds() == 3600 } def 'should create an aws submit request with retry'() { given: - def VAR_RETRY_MODE = new KeyValuePair().withName('AWS_RETRY_MODE').withValue('adaptive') - def VAR_MAX_ATTEMPTS = new KeyValuePair().withName('AWS_MAX_ATTEMPTS').withValue('10') - def VAR_METADATA_ATTEMPTS = new KeyValuePair().withName('AWS_METADATA_SERVICE_NUM_ATTEMPTS').withValue('10') + def VAR_RETRY_MODE = KeyValuePair.builder().name('AWS_RETRY_MODE').value('adaptive').build() + def VAR_MAX_ATTEMPTS = KeyValuePair.builder().name('AWS_MAX_ATTEMPTS').value('10').build() + def VAR_METADATA_ATTEMPTS = KeyValuePair.builder().name('AWS_METADATA_SERVICE_NUM_ATTEMPTS').value('10').build() def task = Mock(TaskRun) task.getName() >> 'batch-task' task.getConfig() >> new TaskConfig(memory: '8GB', cpus: 4, maxRetries: 2) @@ -303,14 +307,15 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue1' 1 * handler.getJobDefinition(task) >> 'job-def:1' and: - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' // no error `retry` error strategy is defined by NF, use `maxRetries` to se Batch attempts - req.getRetryStrategy() == new RetryStrategy() - .withAttempts(3) - .withEvaluateOnExit( new EvaluateOnExit().withAction('RETRY').withOnStatusReason('Host EC2*'), new EvaluateOnExit().withOnReason('*').withAction('EXIT') ) - req.getContainerOverrides().getEnvironment() == [VAR_RETRY_MODE, VAR_MAX_ATTEMPTS, VAR_METADATA_ATTEMPTS] + req.retryStrategy() == RetryStrategy.builder() + .attempts(3) + .evaluateOnExit( EvaluateOnExit.builder().action('RETRY').onStatusReason('Host EC2*').build(), + EvaluateOnExit.builder().onReason('*').action('EXIT').build() ).build() + req.containerOverrides().environment() == [VAR_RETRY_MODE, VAR_MAX_ATTEMPTS, VAR_METADATA_ATTEMPTS] } def 'should return job queue'() { @@ -355,7 +360,7 @@ class AwsBatchTaskHandlerTest extends Specification { } protected KeyValuePair kv(String K, String V) { - new KeyValuePair().withName(K).withValue(V) + return KeyValuePair.builder().name(K).value(V).build() } def 'should return job envs'() { @@ -423,10 +428,7 @@ class AwsBatchTaskHandlerTest extends Specification { def handler = Spy(AwsBatchTaskHandler) def task = Mock(TaskRun) { getContainer()>>IMAGE } - def req = Mock(RegisterJobDefinitionRequest) { - getJobDefinitionName() >> JOB_NAME - getParameters() >> [ 'nf-token': JOB_ID ] - } + def req = RegisterJobDefinitionRequest.builder().jobDefinitionName(JOB_NAME).parameters([ 'nf-token': JOB_ID ]) when: handler.resolveJobDefinition(task) @@ -450,47 +452,40 @@ class AwsBatchTaskHandlerTest extends Specification { given: def JOB_NAME = 'foo-bar-1-0' def JOB_ID = '123' - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def handler = Spy(AwsBatchTaskHandler) handler.@client = client - - def req = new DescribeJobDefinitionsRequest().withJobDefinitionName(JOB_NAME) - def res = Mock(DescribeJobDefinitionsResult) - def job = Mock(JobDefinition) - + def res1 = DescribeJobDefinitionsResponse.builder().jobDefinitions([]).build() + def req = DescribeJobDefinitionsRequest.builder().jobDefinitionName(JOB_NAME).build() + def job1 = JobDefinition.builder().status('ACTIVE').parameters(['nf-token': JOB_ID]).revision(3).build() + def res2 = DescribeJobDefinitionsResponse.builder().jobDefinitions([job1]).build() + def job2 = JobDefinition.builder().status('ACTIVE').parameters([:]).revision(3).build() + def res3 = DescribeJobDefinitionsResponse.builder().jobDefinitions([job2]).build() + def job3 = JobDefinition.builder().status('INACTIVE').parameters([:]).build() + def res4 = DescribeJobDefinitionsResponse.builder().jobDefinitions([job3]).build() when: def result = handler.findJobDef(JOB_NAME, JOB_ID) then: - 1 * client.describeJobDefinitions(req) >> res - 1 * res.getJobDefinitions() >> [] + 1 * client.describeJobDefinitions(req) >> res1 result == null when: result = handler.findJobDef(JOB_NAME, JOB_ID) then: - 1 * client.describeJobDefinitions(req) >> res - 1 * res.getJobDefinitions() >> [job] - 1 * job.getStatus() >> 'ACTIVE' - 1 * job.getParameters() >> ['nf-token': JOB_ID] - 1 * job.getRevision() >> 3 + 1 * client.describeJobDefinitions(req) >> res2 result == "$JOB_NAME:3" when: result = handler.findJobDef(JOB_NAME, JOB_ID) then: - 1 * client.describeJobDefinitions(req) >> res - 1 * res.getJobDefinitions() >> [job] - 1 * job.getStatus() >> 'ACTIVE' - 1 * job.getParameters() >> [:] + 1 * client.describeJobDefinitions(req) >> res3 result == null when: + result = handler.findJobDef(JOB_NAME, JOB_ID) then: - 1 * client.describeJobDefinitions(req) >> res - 1 * res.getJobDefinitions() >> [job] - 1 * job.getStatus() >> 'INACTIVE' - 0 * job.getParameters() + 1 * client.describeJobDefinitions(req) >> res4 result == null } @@ -499,30 +494,29 @@ class AwsBatchTaskHandlerTest extends Specification { given: def JOB_NAME = 'foo-bar-1-0' - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def handler = Spy(AwsBatchTaskHandler) handler.@client = client - def req = new RegisterJobDefinitionRequest() - def res = Mock(RegisterJobDefinitionResult) + def req = RegisterJobDefinitionRequest.builder() as RegisterJobDefinitionRequest.Builder + def res = RegisterJobDefinitionResponse.builder().jobDefinitionName(JOB_NAME).revision(10).build() when: def result = handler.createJobDef(req) then: - 1 * client.registerJobDefinition(req) >> res - 1 * res.getJobDefinitionName() >> JOB_NAME - 1 * res.getRevision() >> 10 + 1 * client.registerJobDefinition(_) >> res and: result == "$JOB_NAME:10" and: - req.getTags().get('nextflow.io/version') == BuildInfo.version - Instant.parse(req.getTags().get('nextflow.io/createdAt')) + def modReq = req.build() as RegisterJobDefinitionRequest + modReq.tags().get('nextflow.io/version') == BuildInfo.version + Instant.parse(modReq.tags().get('nextflow.io/createdAt')) } def 'should add container mounts' () { given: - def container = new ContainerProperties() + def containerBuilder = ContainerProperties.builder() def handler = Spy(AwsBatchTaskHandler) def mounts = [ vol0: '/foo', @@ -532,34 +526,35 @@ class AwsBatchTaskHandlerTest extends Specification { ] when: - handler.addVolumeMountsToContainer(mounts, container) + handler.addVolumeMountsToContainer(mounts, containerBuilder) + def container = containerBuilder.build() then: - container.volumes.size() == 4 - container.mountPoints.size() == 4 - - container.volumes[0].name == 'vol0' - container.volumes[0].host.sourcePath == '/foo' - container.mountPoints[0].sourceVolume == 'vol0' - container.mountPoints[0].containerPath == '/foo' - !container.mountPoints[0].readOnly - - container.volumes[1].name == 'vol1' - container.volumes[1].host.sourcePath == '/foo' - container.mountPoints[1].sourceVolume == 'vol1' - container.mountPoints[1].containerPath == '/bar' - !container.mountPoints[1].readOnly - - container.volumes[2].name == 'vol2' - container.volumes[2].host.sourcePath == '/here' - container.mountPoints[2].sourceVolume == 'vol2' - container.mountPoints[2].containerPath == '/there' - container.mountPoints[2].readOnly - - container.volumes[3].name == 'vol3' - container.volumes[3].host.sourcePath == '/this' - container.mountPoints[3].sourceVolume == 'vol3' - container.mountPoints[3].containerPath == '/that' - !container.mountPoints[3].readOnly + container.volumes().size() == 4 + container.mountPoints().size() == 4 + + container.volumes()[0].name() == 'vol0' + container.volumes()[0].host().sourcePath() == '/foo' + container.mountPoints()[0].sourceVolume() == 'vol0' + container.mountPoints()[0].containerPath() == '/foo' + !container.mountPoints()[0].readOnly() + + container.volumes()[1].name() == 'vol1' + container.volumes()[1].host().sourcePath() == '/foo' + container.mountPoints()[1].sourceVolume() == 'vol1' + container.mountPoints()[1].containerPath() == '/bar' + !container.mountPoints()[1].readOnly() + + container.volumes()[2].name() == 'vol2' + container.volumes()[2].host().sourcePath() == '/here' + container.mountPoints()[2].sourceVolume() == 'vol2' + container.mountPoints()[2].containerPath() == '/there' + container.mountPoints()[2].readOnly() + + container.volumes()[3].name() == 'vol3' + container.volumes()[3].host().sourcePath() == '/this' + container.mountPoints()[3].sourceVolume() == 'vol3' + container.mountPoints()[3].containerPath() == '/that' + !container.mountPoints()[3].readOnly() } @@ -581,7 +576,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getAwsOptions() >> new AwsOptions() result.jobDefinitionName == JOB_NAME result.type == 'container' - result.parameters.'nf-token' == 'bfd3cc19ee9bdaea5b7edee94adf04bc' + result.parameters.'nf-token' == CacheHelper.hasher([JOB_NAME, result.containerProperties.build().toString()]).hash().toString() !result.containerProperties.logConfiguration !result.containerProperties.mountPoints !result.containerProperties.privileged @@ -593,7 +588,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getAwsOptions() >> new AwsOptions(awsConfig: new AwsConfig(batch: [cliPath: '/home/conda/bin/aws', logsGroup: '/aws/batch'], region: 'us-east-1')) result.jobDefinitionName == JOB_NAME result.type == 'container' - result.parameters.'nf-token' == 'af124f8899bcfc8a02037599f59a969a' + result.parameters.'nf-token' == CacheHelper.hasher([JOB_NAME, result.containerProperties.build().toString()]).hash().toString() result.containerProperties.logConfiguration.'LogDriver' == 'awslogs' result.containerProperties.logConfiguration.'Options'.'awslogs-region' == 'us-east-1' result.containerProperties.logConfiguration.'Options'.'awslogs-group' == '/aws/batch' @@ -681,7 +676,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getAwsOptions() >> new AwsOptions() result.jobDefinitionName == JOB_NAME result.type == 'container' - result.parameters.'nf-token' == '9da434654d8c698f87da973625f57489' + result.parameters.'nf-token' == CacheHelper.hasher([JOB_NAME, result.containerProperties.build().toString()]).hash().toString() result.containerProperties.privileged } @@ -768,8 +763,8 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getAwsOptions() >> opts then: - result.getContainerProperties().getUser() == 'foo' - result.getContainerProperties().getPrivileged() == true + result.containerProperties.user == 'foo' + result.containerProperties.privileged == true } @@ -777,21 +772,20 @@ class AwsBatchTaskHandlerTest extends Specification { given: def JOB_ID = 'job-2' - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def handler = Spy(AwsBatchTaskHandler) handler.@client = client - def JOB1 = new JobDetail().withJobId('job-1') - def JOB2 = new JobDetail().withJobId('job-2') - def JOB3 = new JobDetail().withJobId('job-3') + def JOB1 = JobDetail.builder().jobId('job-1').build() + def JOB2 = JobDetail.builder().jobId('job-2').build() + def JOB3 = JobDetail.builder().jobId('job-3').build() def JOBS = [ JOB1, JOB2, JOB3 ] - def resp = Mock(DescribeJobsResult) - resp.getJobs() >> JOBS + def resp = DescribeJobsResponse.builder().jobs(JOBS).build() when: def result = handler.describeJob(JOB_ID) then: - 1 * client.describeJobs(new DescribeJobsRequest().withJobs(JOB_ID)) >> resp + 1 * client.describeJobs(DescribeJobsRequest.builder().jobs(JOB_ID).build()) >> resp result == JOB2 } @@ -801,25 +795,24 @@ class AwsBatchTaskHandlerTest extends Specification { given: def collector = Mock(BatchContext) def JOB_ID = 'job-1' - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def handler = Spy(AwsBatchTaskHandler) handler.@client = client handler.@jobId = JOB_ID handler.batch(collector) - def JOB1 = new JobDetail().withJobId('job-1') - def JOB2 = new JobDetail().withJobId('job-2') - def JOB3 = new JobDetail().withJobId('job-3') + def JOB1 = JobDetail.builder().jobId('job-1').build() + def JOB2 = JobDetail.builder().jobId('job-2').build() + def JOB3 = JobDetail.builder().jobId('job-3').build() def JOBS = [ JOB1, JOB2, JOB3 ] - def RESP = Mock(DescribeJobsResult) - RESP.getJobs() >> JOBS + def RESP = DescribeJobsResponse.builder().jobs(JOBS).build() when: def result = handler.describeJob(JOB_ID) then: 1 * collector.contains(JOB_ID) >> false 1 * collector.getBatchFor(JOB_ID, 100) >> ['job-1','job-2','job-3'] - 1 * client.describeJobs(new DescribeJobsRequest().withJobs(['job-1','job-2','job-3'])) >> RESP + 1 * client.describeJobs(DescribeJobsRequest.builder().jobs(['job-1','job-2','job-3']).build()) >> RESP result == JOB1 } @@ -829,13 +822,13 @@ class AwsBatchTaskHandlerTest extends Specification { given: def collector = Mock(BatchContext) def JOB_ID = 'job-1' - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def handler = Spy(AwsBatchTaskHandler) handler.@client = client handler.@jobId = JOB_ID handler.batch(collector) - def JOB1 = new JobDetail().withJobId('job-1') + def JOB1 = JobDetail.builder().jobId('job-1').build() when: def result = handler.describeJob(JOB_ID) @@ -852,14 +845,14 @@ class AwsBatchTaskHandlerTest extends Specification { given: def task = Mock(TaskRun) - def client = Mock(AWSBatch) + def client = Mock(BatchClient) def proxy = Mock(AwsBatchProxy) def handler = Spy(AwsBatchTaskHandler) handler.@client = proxy handler.task = task - def req = Mock(SubmitJobRequest) - def resp = Mock(SubmitJobResult) + def req = SubmitJobRequest.builder().build() + def resp = SubmitJobResponse.builder().jobId('12345').build() when: handler.submit() @@ -867,7 +860,6 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.newSubmitRequest(task) >> req 1 * handler.bypassProxy(proxy) >> client 1 * client.submitJob(req) >> resp - 1 * resp.getJobId() >> '12345' handler.status == TaskStatus.SUBMITTED handler.jobId == '12345' @@ -999,8 +991,8 @@ class AwsBatchTaskHandlerTest extends Specification { def 'should create an aws submit request with labels'() { given: - def VAR_FOO = new KeyValuePair().withName('FOO').withValue('1') - def VAR_BAR = new KeyValuePair().withName('BAR').withValue('2') + def VAR_FOO = KeyValuePair.builder().name('FOO').value('1').build() + def VAR_BAR = KeyValuePair.builder().name('BAR').value('2').build() def task = Mock(TaskRun) task.getName() >> 'batch-task' task.getConfig() >> new TaskConfig(memory: '8GB', cpus: 4, maxRetries: 2, errorStrategy: 'retry', resourceLabels:[a:'b']) @@ -1017,18 +1009,20 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batch-task' - req.getJobQueue() == 'queue1' - req.getJobDefinition() == 'job-def:1' - req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' - req.getContainerOverrides().getResourceRequirements().find { it.type=='MEMORY'}.getValue() == '8192' - req.getContainerOverrides().getEnvironment() == [VAR_FOO, VAR_BAR] - req.getContainerOverrides().getCommand() == ['sh', '-c','hello'] - req.getRetryStrategy() == new RetryStrategy() - .withAttempts(5) - .withEvaluateOnExit( new EvaluateOnExit().withAction('RETRY').withOnStatusReason('Host EC2*'), new EvaluateOnExit().withOnReason('*').withAction('EXIT') ) - req.getTags() == [a:'b'] - req.getPropagateTags() == true + req.jobName() == 'batch-task' + req.jobQueue() == 'queue1' + req.jobDefinition() == 'job-def:1' + req.containerOverrides().resourceRequirements().find { it.type()==ResourceType.VCPU}.value() == '4' + req.containerOverrides().resourceRequirements().find { it.type()==ResourceType.MEMORY}.value() == '8192' + req.containerOverrides().environment() == [VAR_FOO, VAR_BAR] + req.containerOverrides().command() == ['sh', '-c','hello'] + req.retryStrategy() == RetryStrategy.builder() + .attempts(5) + .evaluateOnExit( EvaluateOnExit.builder().action('RETRY').onStatusReason('Host EC2*').build(), + EvaluateOnExit.builder().onReason('*').action('EXIT').build()) + .build() + req.tags() == [a:'b'] + req.propagateTags() == true } def 'get fusion submit command' () { diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsContainerOptionsMapperTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsContainerOptionsMapperTest.groovy index 0ce1064a53..2bfef17fbb 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsContainerOptionsMapperTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsContainerOptionsMapperTest.groovy @@ -1,6 +1,8 @@ package nextflow.cloud.aws.batch import nextflow.util.CmdLineHelper +import software.amazon.awssdk.services.batch.model.Tmpfs +import software.amazon.awssdk.services.batch.model.Ulimit import spock.lang.Specification /** @@ -14,11 +16,14 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--env VAR_FOO -e VAR_FOO2=value2 --env VAR_FOO3=value3') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - def environment = properties.getEnvironment() + def environment = properties.environment() environment.size() == 3 - environment.get(0).toString() == '{Name: VAR_FOO,}' - environment.get(1).toString() == '{Name: VAR_FOO3,Value: value3}' - environment.get(2).toString() == '{Name: VAR_FOO2,Value: value2}' + environment.get(0).name() == 'VAR_FOO' + environment.get(0).value() == null + environment.get(1).name() == 'VAR_FOO3' + environment.get(1).value() == 'value3' + environment.get(2).name() == 'VAR_FOO2' + environment.get(2).value() == 'value2' } def 'should set ulimits'() { @@ -27,9 +32,9 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--ulimit nofile=1280:2560 --ulimit nproc=16:32') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getUlimits().size() == 2 - properties.getUlimits().get(0).toString() == '{HardLimit: 2560,Name: nofile,SoftLimit: 1280}' - properties.getUlimits().get(1).toString() == '{HardLimit: 32,Name: nproc,SoftLimit: 16}' + properties.ulimits().size() == 2 + properties.ulimits().get(0) == Ulimit.builder().hardLimit(2560).name('nofile').softLimit(1280).build() + properties.ulimits().get(1) == Ulimit.builder().hardLimit(32).name('nproc').softLimit(16).build() } @@ -39,7 +44,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--user nf-user') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getUser() == 'nf-user' + properties.user() == 'nf-user' } def 'should set privileged'() { @@ -48,7 +53,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--privileged') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getPrivileged() + properties.privileged() } def 'should set readonly'() { @@ -57,7 +62,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--read-only') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getReadonlyRootFilesystem() + properties.readonlyRootFilesystem() } def 'should set env'() { @@ -65,8 +70,8 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('-e x=y') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getEnvironment().get(0).getName()=='x' - properties.getEnvironment().get(0).getValue()=='y' + properties.environment().get(0).name()=='x' + properties.environment().get(0).value()=='y' } def 'should set tmpfs linux params'() { @@ -75,8 +80,8 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--tmpfs /run:rw,noexec,nosuid,size=64 --tmpfs /app:ro,size=128') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getTmpfs().get(0).toString() == '{ContainerPath: /run,Size: 64,MountOptions: [rw, noexec, nosuid]}' - properties.getLinuxParameters().getTmpfs().get(1).toString() == '{ContainerPath: /app,Size: 128,MountOptions: [ro]}' + properties.linuxParameters().tmpfs().get(0) == Tmpfs.builder().containerPath('/run').size(64).mountOptions(['rw', 'noexec', 'nosuid']).build() + properties.linuxParameters().tmpfs().get(1) == Tmpfs.builder().containerPath('/app').size(128).mountOptions(['ro']).build() } def 'should set memory swap '() { @@ -85,7 +90,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--memory-swap 2048') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getMaxSwap() == 2048 + properties.linuxParameters().maxSwap() == 2048 } def 'should set shared memory size'() { @@ -94,7 +99,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--shm-size 12048024') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getSharedMemorySize() == 11 + properties.linuxParameters().sharedMemorySize() == 11 } def 'should set shared memory size with unit in MiB'() { @@ -103,7 +108,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--shm-size 256m') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getSharedMemorySize() == 256 + properties.linuxParameters().sharedMemorySize() == 256 } def 'should set shared memory size with unit in GiB'() { @@ -112,7 +117,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--shm-size 1g') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getSharedMemorySize() == 1024 + properties.linuxParameters().sharedMemorySize() == 1024 } def 'should set memory swappiness'() { @@ -121,7 +126,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--memory-swappiness 12048024') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getSwappiness() == 12048024 + properties.linuxParameters().swappiness() == 12048024 } def 'should set init'() { @@ -130,7 +135,7 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('--init') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters().getInitProcessEnabled() + properties.linuxParameters().initProcessEnabled() } def 'should set no params'() { @@ -139,11 +144,11 @@ class AwsContainerOptionsMapperTest extends Specification { def map = CmdLineHelper.parseGnuArgs('') def properties = AwsContainerOptionsMapper.createContainerProperties(map) then: - properties.getLinuxParameters() == null - properties.getUlimits() == null - properties.getPrivileged() == null - properties.getReadonlyRootFilesystem() == null - properties.getUser() == null + properties.linuxParameters() == null + properties.ulimits() == [] + properties.privileged() == null + properties.readonlyRootFilesystem() == null + properties.user() == null } } diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsOptionsTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsOptionsTest.groovy index bc1707fd30..06b0e645aa 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsOptionsTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsOptionsTest.groovy @@ -18,7 +18,7 @@ package nextflow.cloud.aws.batch import java.nio.file.Paths -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import nextflow.Session import nextflow.cloud.aws.config.AwsConfig import nextflow.exception.ProcessUnrecoverableException @@ -247,13 +247,13 @@ class AwsOptionsTest extends Specification { when: def opts = new AwsOptions(new Session(aws:[client:[s3Acl: 'PublicRead']])) then: - opts.getS3Acl() == CannedAccessControlList.PublicRead + opts.getS3Acl() == ObjectCannedACL.PUBLIC_READ when: opts = new AwsOptions(new Session(aws:[client:[s3Acl: 'public-read']])) then: - opts.getS3Acl() == CannedAccessControlList.PublicRead + opts.getS3Acl() == ObjectCannedACL.PUBLIC_READ when: diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3ConfigTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3ConfigTest.groovy index aef151a385..9211bc4a4b 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3ConfigTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3ConfigTest.groovy @@ -17,7 +17,7 @@ package nextflow.cloud.aws.config -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import nextflow.SysEnv import spock.lang.Specification import spock.lang.Unroll @@ -61,7 +61,7 @@ class AwsS3ConfigTest extends Specification { client.storageClass == 'STANDARD' client.storageKmsKeyId == 'key-1' client.storageEncryption == 'AES256' - client.s3Acl == CannedAccessControlList.PublicRead + client.s3Acl == ObjectCannedACL.PUBLIC_READ client.pathStyleAccess client.anonymous } diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3BaseSpec.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3BaseSpec.groovy index f297488f04..3a0b4e227f 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3BaseSpec.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3BaseSpec.groovy @@ -17,16 +17,26 @@ package nextflow.cloud.aws.nio +import software.amazon.awssdk.core.sync.RequestBody +import software.amazon.awssdk.services.s3.model.GetObjectRequest +import software.amazon.awssdk.services.s3.model.HeadBucketRequest +import software.amazon.awssdk.services.s3.model.HeadObjectRequest + import java.nio.ByteBuffer import java.nio.channels.SeekableByteChannel import java.nio.file.Path import java.nio.file.Paths -import com.amazonaws.services.s3.AmazonS3 -import com.amazonaws.services.s3.model.AmazonS3Exception -import com.amazonaws.services.s3.model.ListVersionsRequest -import com.amazonaws.services.s3.model.S3ObjectSummary -import com.amazonaws.services.s3.model.S3VersionSummary +import software.amazon.awssdk.services.s3.S3Client +import software.amazon.awssdk.services.s3.model.CreateBucketRequest +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest +import software.amazon.awssdk.services.s3.model.S3Exception +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request +import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest +import software.amazon.awssdk.services.s3.model.S3Object +import software.amazon.awssdk.services.s3.model.ObjectVersion +import software.amazon.awssdk.services.s3.model.PutObjectRequest import nextflow.cloud.aws.util.S3PathFactory import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -38,14 +48,14 @@ trait AwsS3BaseSpec { static final Logger log = LoggerFactory.getLogger(AwsS3BaseSpec) - abstract AmazonS3 getS3Client() + abstract S3Client getS3Client() S3Path s3path(String path) { return (S3Path) S3PathFactory.parse(path) } String createBucket(String bucketName) { - s3Client.createBucket(bucketName) + s3Client.createBucket(CreateBucketRequest.builder().bucket(bucketName).build() as CreateBucketRequest) return bucketName } @@ -75,21 +85,21 @@ trait AwsS3BaseSpec { def (bucketName, blobName) = splitName(path) if( !blobName ) throw new IllegalArgumentException("There should be at least one dir level: $path") - return s3Client .putObject(bucketName, blobName, content) + return s3Client.putObject(PutObjectRequest.builder().bucket(bucketName).key(blobName).build() as PutObjectRequest, RequestBody.fromBytes(content.bytes)) } def createDirectory(String path) { log.debug "Creating blob directory '$path'" def (bucketName, blobName) = splitName(path) blobName += '/' - s3Client.putObject(bucketName, blobName, '') + s3Client.putObject(PutObjectRequest.builder().bucket(bucketName).key(blobName).build() as PutObjectRequest, RequestBody.empty()) } def deleteObject(String path) { log.debug "Deleting blob object '$path'" def (bucketName, blobName) = splitName(path) blobName += '/' - s3Client.deleteObject(bucketName, blobName) + s3Client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(blobName).build() as DeleteObjectRequest) } def deleteBucket(Path path) { @@ -109,42 +119,27 @@ trait AwsS3BaseSpec { // delete markers for all objects, but doesn't delete the object versions. // To delete objects from versioned buckets, delete all of the object versions before deleting // the bucket (see below for an example). - def objectListing = s3Client.listObjects(bucketName); - while (true) { - Iterator objIter = objectListing.getObjectSummaries().iterator(); + def objectListingIterator = s3Client.listObjectsV2Paginator(ListObjectsV2Request.builder().bucket(bucketName).build() as ListObjectsV2Request).iterator(); + while (objectListingIterator.hasNext()) { + Iterator objIter = objectListingIterator.next().contents().iterator(); while (objIter.hasNext()) { - s3Client.deleteObject(bucketName, objIter.next().getKey()); - } - - // If the bucket contains many objects, the listObjects() call - // might not return all of the objects in the first listing. Check to - // see whether the listing was truncated. If so, retrieve the next page of objects - // and delete them. - if (objectListing.isTruncated()) { - objectListing = s3Client.listNextBatchOfObjects(objectListing); - } else { - break; + s3Client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(objIter.next().key()).build() as DeleteObjectRequest); } } // Delete all object versions (required for versioned buckets). - def versionList = s3Client.listVersions(new ListVersionsRequest().withBucketName(bucketName)); - while (true) { - Iterator versionIter = versionList.getVersionSummaries().iterator(); - while (versionIter.hasNext()) { - S3VersionSummary vs = versionIter.next(); - s3Client.deleteVersion(bucketName, vs.getKey(), vs.getVersionId()); + def versionListIterator = s3Client.listObjectVersionsPaginator(ListObjectVersionsRequest.builder().bucket(bucketName).build() as ListObjectVersionsRequest).iterator(); + while ( versionListIterator.hasNext()){ + Iterator versionIter = versionListIterator.next().versions().iterator(); + while ( versionIter.hasNext() ) { + ObjectVersion vs = versionIter.next(); + s3Client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(vs.key()).versionId(vs.versionId()).build() as DeleteObjectRequest); } - if (versionList.isTruncated()) { - versionList = s3Client.listNextBatchOfVersions(versionList); - } else { - break; - } } // After all objects and object versions are deleted, delete the bucket. - s3Client.deleteBucket(bucketName); + s3Client.deleteBucket( DeleteBucketRequest.builder().bucket(bucketName).build() as DeleteBucketRequest); } @@ -170,15 +165,15 @@ trait AwsS3BaseSpec { try { if( !blobName ) { - return s3Client.doesBucketExist(path.getName(0).toString()) + return s3Client.headBucket(HeadBucketRequest.builder().bucket(bucketName).build() as HeadBucketRequest) } else { - s3Client.getObject(bucketName, blobName).getObjectMetadata() + s3Client.headObject(HeadObjectRequest.builder().bucket(bucketName).key(blobName).build() as HeadObjectRequest) return true } } - catch (AmazonS3Exception e) { - if( e.statusCode == 404 ) + catch (S3Exception e) { + if( e.statusCode() == 404 ) return false throw e } @@ -193,8 +188,7 @@ trait AwsS3BaseSpec { log.debug "Reading blob object '$path'" def (bucketName, blobName) = splitName(path) return s3Client - .getObject(bucketName, blobName) - .getObjectContent() + .getObject(GetObjectRequest.builder().bucket(bucketName).key(blobName).build() as GetObjectRequest) .getText() } diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3NioTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3NioTest.groovy index 8ef0b60b47..ff4f247820 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3NioTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/AwsS3NioTest.groovy @@ -17,6 +17,8 @@ package nextflow.cloud.aws.nio +import software.amazon.awssdk.services.s3.model.StorageClass + import java.nio.charset.Charset import java.nio.file.DirectoryNotEmptyException import java.nio.file.FileAlreadyExistsException @@ -30,8 +32,8 @@ import java.nio.file.StandardCopyOption import java.nio.file.StandardOpenOption import java.nio.file.attribute.BasicFileAttributes -import com.amazonaws.services.s3.AmazonS3 -import com.amazonaws.services.s3.model.Tag +import software.amazon.awssdk.services.s3.S3Client +import software.amazon.awssdk.services.s3.model.Tag import groovy.util.logging.Slf4j import nextflow.Global import nextflow.Session @@ -57,9 +59,9 @@ import spock.lang.Unroll class AwsS3NioTest extends Specification implements AwsS3BaseSpec { @Shared - private AmazonS3 s3Client0 + private S3Client s3Client0 - AmazonS3 getS3Client() { s3Client0 } + S3Client getS3Client() { s3Client0 } static private Map config0() { def accessKey = System.getenv('AWS_S3FS_ACCESS_KEY') @@ -1009,8 +1011,8 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { Files.exists(path) and: def tags = client .getObjectTags(path.getBucket(), path.getKey()) - tags.find { it.key=='FOO' }.value == 'Hello world' - tags.find { it.key=='BAR' }.value == 'xyz' + tags.find { it.key() =='FOO' }.value() == 'Hello world' + tags.find { it.key() =='BAR' }.value() == 'xyz' when: copy.setTags(FOO: 'Hola mundo', BAZ: '123') @@ -1019,9 +1021,9 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { Files.exists(copy) and: def copyTags = client .getObjectTags(copy.getBucket(), copy.getKey()) - copyTags.find { it.key=='FOO' }.value == 'Hola mundo' - copyTags.find { it.key=='BAZ' }.value == '123' - copyTags.find { it.key=='BAR' } == null + copyTags.find { it.key() =='FOO' }.value() == 'Hola mundo' + copyTags.find { it.key() =='BAZ' }.value() == '123' + copyTags.find { it.key() =='BAR' } == null cleanup: deleteBucket(bucketName) @@ -1104,8 +1106,8 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { client.getObjectKmsKeyId(target.bucket, "$target.key/file-1.txt") == KEY client.getObjectKmsKeyId(target.bucket, "$target.key/alpha/beta/file-5.txt") == KEY and: - client.getObjectTags(target.bucket, "$target.key/file-1.txt") == [ new Tag('ONE','HELLO') ] - client.getObjectTags(target.bucket, "$target.key/alpha/beta/file-5.txt") == [ new Tag('ONE','HELLO') ] + client.getObjectTags(target.bucket, "$target.key/file-1.txt") == [ Tag.builder().key('ONE').value('HELLO').build() ] + client.getObjectTags(target.bucket, "$target.key/alpha/beta/file-5.txt") == [ Tag.builder().key('ONE').value('HELLO').build() ] cleanup: target?.deleteDir() @@ -1295,7 +1297,7 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { and: client .getObjectMetadata(target1.getBucket(), target1.getKey()) - .getContentType() == 'text/foo' + .contentType() == 'text/foo' // copy a file across buckets when: @@ -1309,7 +1311,7 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { Files.exists(target2) client .getObjectMetadata(target2.getBucket(), target2.getKey()) - .getContentType() == 'text/bar' + .contentType() == 'text/bar' cleanup: deleteBucket(bucket1) @@ -1348,7 +1350,7 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { and: client .getObjectMetadata(target1.getBucket(), target1.getKey()) - .getStorageClass() == 'REDUCED_REDUNDANCY' + .storageClass() == StorageClass.REDUCED_REDUNDANCY // copy a file across buckets when: @@ -1362,7 +1364,7 @@ class AwsS3NioTest extends Specification implements AwsS3BaseSpec { Files.exists(target2) client .getObjectMetadata(target2.getBucket(), target2.getKey()) - .getStorageClass() == 'STANDARD_IA' + .storageClass() == StorageClass.STANDARD_IA cleanup: deleteBucket(bucket1) diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy index be9ded0b01..dc2b91ce65 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy @@ -17,9 +17,9 @@ package nextflow.cloud.aws.nio -import nextflow.cloud.aws.config.AwsConfig +import software.amazon.awssdk.services.s3.model.ObjectCannedACL +import software.amazon.awssdk.services.s3.model.ServerSideEncryption import spock.lang.Specification -import spock.lang.Unroll /** * @@ -27,21 +27,48 @@ import spock.lang.Unroll */ class S3FileSystemProviderTest extends Specification { - @Unroll - def 'should get global region' () { + def 'should create filesystem from config'(){ given: - def provider = Spy(S3FileSystemProvider) - - expect: - provider.globalRegion(new AwsConfig(CONFIG)) == EXPECTED - - where: - EXPECTED | CONFIG - 'us-east-1' | [:] - 'us-east-1' | [region:'foo'] - 'us-east-1' | [region:'foo', client:[endpoint: 'http://s3.us-east-2.amazonaws.com']] - 'foo' | [region:'foo', client:[endpoint: 'http://bar.com']] - + def config = [client: [ anonymous: true, s3acl: 'Private', connectionTimeout: 20000, endpoint: 'https://s3.eu-west-1.amazonaws.com', + maxConnections: 100, maxErrorRetry: 3, socketTimeout: 20000, requesterPays: true, s3PathStyleAccess: true, + proxyHost: 'host.com', proxyPort: 80, proxyScheme: 'https', proxyUsername: 'user', proxyPassword: 'pass', + signerOverride: 'S3SignerType', userAgent: 'Agent1', storageEncryption: 'AES256', storageKmsKeyId: 'arn:key:id', + uploadMaxThreads: 20, uploadChunkSize: '7MB', uploadMaxAttempts: 4, uploadRetrySleep: '200ms' + ], + accessKey: '123456abc', secretKey: '78910def', profile: 'test'] + def provider = new S3FileSystemProvider(); + when: + def fs = provider.newFileSystem(new URI("s3:///bucket/key"), config) as S3FileSystem + then: + fs.getBucketName() == 'bucket' + def client = fs.getClient() + client.client != null + client.uploadMaxThreads == 20 + client.uploadChunkSize == 7340032 + client.cannedAcl == ObjectCannedACL.PRIVATE + client.storageEncryption == ServerSideEncryption.AES256 + client.isRequesterPaysEnabled == true + client.kmsKeyId == 'arn:key:id' + client.factory.accessKey() == '123456abc' + client.factory.secretKey() == '78910def' + client.factory.profile() == 'test' + client.factory.config.s3Config.anonymous == true + client.factory.config.s3Config.endpoint == 'https://s3.eu-west-1.amazonaws.com' + client.factory.config.s3Config.pathStyleAccess == true + fs.properties().getProperty('proxy_host') == 'host.com' + fs.properties().getProperty('proxy_port') == '80' + fs.properties().getProperty('proxy_scheme') == 'https' + fs.properties().getProperty('proxy_username') == 'user' + fs.properties().getProperty('proxy_password') == 'pass' + fs.properties().getProperty('signer_override') == 'S3SignerType' + fs.properties().getProperty('user_agent') == 'Agent1' + fs.properties().getProperty('socket_timeout') == '20000' + fs.properties().getProperty('connection_timeout') == '20000' + fs.properties().getProperty('max_connections') == '100' + fs.properties().getProperty('max_error_retry') == '3' + fs.properties().getProperty('upload_max_attempts') == '4' + fs.properties().getProperty('upload_retry_sleep') == '200' } + } diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/ng/S3ParallelDownloadTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/ng/S3ParallelDownloadTest.groovy deleted file mode 100644 index e9c904db19..0000000000 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/ng/S3ParallelDownloadTest.groovy +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright 2020-2022, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.nio.ng - - -import java.nio.file.Files -import java.nio.file.Paths -import java.time.temporal.ChronoUnit - -import com.amazonaws.services.s3.AmazonS3 -import com.amazonaws.services.s3.model.ObjectMetadata -import dev.failsafe.Failsafe -import dev.failsafe.RetryPolicy -import dev.failsafe.function.ContextualSupplier -import groovy.util.logging.Slf4j -import nextflow.Global -import nextflow.Session -import nextflow.cloud.aws.nio.S3FileSystem -import nextflow.file.FileHelper -import spock.lang.Ignore -import spock.lang.IgnoreIf -import spock.lang.Requires -import spock.lang.Shared -import spock.lang.Specification -/** - * - * @author Paolo Di Tommaso - */ -@IgnoreIf({System.getenv('NXF_SMOKE')}) -@Requires({System.getenv('AWS_S3FS_ACCESS_KEY') && System.getenv('AWS_S3FS_SECRET_KEY')}) -@Slf4j -class S3ParallelDownloadTest extends Specification { - - @Shared - static AmazonS3 s3Client0 - - AmazonS3 getS3Client() { s3Client0 } - - static { - def fs = (S3FileSystem) FileHelper.getOrCreateFileSystemFor(URI.create("s3:///"), config0()) - s3Client0 = fs.client.getClient() - } - - static private Map config0() { - def accessKey = System.getenv('AWS_S3FS_ACCESS_KEY') - def secretKey = System.getenv('AWS_S3FS_SECRET_KEY') - return [aws: [access_key: accessKey, secret_key: secretKey]] - } - - def setup() { - def cfg = config0() - Global.config = cfg - Global.session = Mock(Session) { getConfig()>>cfg } - } - - @Ignore - def 'should download small file' () { - given: - def downloader = new S3ParallelDownload(s3Client) - - when: - def stream = downloader.download('nextflow-ci','hello.txt') - then: - stream.text == 'Hello world\n' - } - - @Ignore - def 'should download 100 mb file' () { - given: - def downloader = new S3ParallelDownload(s3Client) - def target = Paths.get('file-100MB.data-copy.data') - and: - Files.deleteIfExists(target) - when: - def stream = downloader.download('nextflow-ci','file-100MB.data') - then: - Files.copy(stream, target) - - cleanup: - stream?.close() - } - - @Ignore - def 'should download 10 gb file' () { - given: - def downloader = new S3ParallelDownload(s3Client) - def target = Paths.get('real.fastq.gz') - - when: - Files.deleteIfExists(target) - and: - def stream = downloader.download('nextflow-ci','petagene/example_data/real.fastq.gz') - then: - Files.copy(stream, target) - - cleanup: - stream?.close() - } - - def 'should create part single' () { - given: - def FILE_LEN = 1 - def CHUNK_SIZE = 1000 - and: - def client = Mock(AmazonS3) - def download = new S3ParallelDownload(client, new DownloadOpts(download_chunk_size: String.valueOf(CHUNK_SIZE))) - def META = Mock(ObjectMetadata) {getContentLength() >> FILE_LEN } - - when: - def result = download.prepareGetPartRequests('foo','bar').iterator() - then: - 1 * client.getObjectMetadata('foo','bar') >> META - and: - with(result.next()) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [0,0] - } - and: - !result.hasNext() - } - - - def 'should create part requests' () { - given: - def FILE_LEN = 3_000 - def CHUNK_SIZE = 1000 - and: - def client = Mock(AmazonS3) - def download = new S3ParallelDownload(client, new DownloadOpts(download_chunk_size: String.valueOf(CHUNK_SIZE))) - def META = Mock(ObjectMetadata) {getContentLength() >> FILE_LEN } - - when: - def result = download.prepareGetPartRequests('foo','bar').iterator() - then: - 1 * client.getObjectMetadata('foo','bar') >> META - and: - with(result.next()) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [0,999] - } - and: - with(result.next()) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [1000,1999] - } - and: - with(result.next()) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [2000,2999] - } - and: - !result.hasNext() - } - - def 'should create long requests' () { - given: - def FILE_LEN = 6_000_000_000 - def CHUNK_SIZE = 2_000_000_000 - and: - def client = Mock(AmazonS3) - def download = new S3ParallelDownload(client, new DownloadOpts(download_chunk_size: String.valueOf(CHUNK_SIZE), download_buffer_max_size: String.valueOf(CHUNK_SIZE))) - def META = Mock(ObjectMetadata) {getContentLength() >> FILE_LEN } - - when: - def result = download.prepareGetPartRequests('foo','bar') - then: - 1 * client.getObjectMetadata('foo','bar') >> META - and: - with(result[0]) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [0,1_999_999_999] - } - and: - with(result[1]) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [2_000_000_000,3_999_999_999] - } - and: - with(result[2]) { - getBucketName() == 'foo' - getKey() == 'bar' - getRange() == [4_000_000_000,5_999_999_999] - } - and: - result.size()==3 - } - - @Ignore - def 'test failsafe' () { - given: - RetryPolicy retryPolicy = RetryPolicy.builder() - .handle(RuntimeException.class) -// .withDelay(Duration.ofSeconds(1)) -// .withMaxDuration(Duration.of(60, ChronoUnit.SECONDS)) - .withBackoff(1, 30, ChronoUnit.SECONDS) - .withMaxRetries(10) - .onFailedAttempt(e -> log.error("Connection attempt failed - cause: ${e.getLastFailure()}")) - .onRetry(e -> log.warn("Failure #{}. Retrying.", e.getAttemptCount())) - .build(); - - when: - def work = { dev.failsafe.ExecutionContext it -> - log.debug "try num ${it.getAttemptCount()}" - throw new RuntimeException("Break ${it.getAttemptCount()}") - } as ContextualSupplier - def result = Failsafe.with(retryPolicy).get( work ) - then: - result == 'Hello' - } -} diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/util/S3ClientConfigurationTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/util/S3ClientConfigurationTest.groovy new file mode 100644 index 0000000000..bf4179de54 --- /dev/null +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/util/S3ClientConfigurationTest.groovy @@ -0,0 +1,79 @@ +/* + * Copyright 2013-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package nextflow.cloud.aws.nio.util + +import nextflow.cloud.aws.config.AwsConfig +import software.amazon.awssdk.auth.signer.AwsS3V4Signer +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption +import software.amazon.awssdk.http.SdkHttpConfigurationOption +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient +import software.amazon.awssdk.http.crt.internal.AwsCrtClientBuilderBase +import spock.lang.Specification + +class S3ClientConfigurationTest extends Specification{ + def 'create S3 synchronous client configuration' (){ + given: + def props = new Properties() + def config = new AwsConfig([client: [connectionTimeout: 20000, maxConnections: 100, maxErrorRetry: 3, socketTimeout: 20000, + proxyHost: 'host.com', proxyPort: 80, proxyScheme: 'https', proxyUsername: 'user', proxyPassword: 'pass', + signerOverride: 'S3SignerType', userAgent: 'Agent1' ]]) + props.putAll(config.getS3LegacyProperties()) + when: + def clientConfig = S3SyncClientConfiguration.create(props) + then: + def overrideConfig = clientConfig.getClientOverrideConfiguration() + overrideConfig.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).get() == 'Agent1' + overrideConfig.advancedOption(SdkAdvancedClientOption.SIGNER).get() instanceof AwsS3V4Signer + overrideConfig.retryStrategy().get().maxAttempts() == 4 + def httpClientbuilder = clientConfig.getHttpClientBuilder() + httpClientbuilder.proxyConfiguration.host() == 'host.com' + httpClientbuilder.proxyConfiguration.port() == 80 + httpClientbuilder.proxyConfiguration.scheme() == 'https' + httpClientbuilder.proxyConfiguration.username() == 'user' + httpClientbuilder.proxyConfiguration.password() == 'pass' + httpClientbuilder.standardOptions.get(SdkHttpConfigurationOption.CONNECTION_TIMEOUT).toMillis()== 20000 + httpClientbuilder.standardOptions.get(SdkHttpConfigurationOption.READ_TIMEOUT).toMillis() == 20000 //socket timeout + httpClientbuilder.standardOptions.get(SdkHttpConfigurationOption.MAX_CONNECTIONS) == 100 + } + + def 'create S3 asynchronous client configuration' (){ + given: + def props = new Properties() + def config = new AwsConfig([client: [connectionTimeout: 20000, maxConnections: 100, maxErrorRetry: 3, socketTimeout: 20000, + proxyHost: 'host.com', proxyPort: 80, proxyScheme: 'https', proxyUsername: 'user', proxyPassword: 'pass', + signerOverride: 'S3SignerType', userAgent: 'Agent1' ]]) + props.putAll(config.getS3LegacyProperties()) + when: + def clientConfig = S3AsyncClientConfiguration.create(props) + then: + def overrideConfig = clientConfig.getClientOverrideConfiguration() + overrideConfig.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).get() == 'Agent1' + overrideConfig.advancedOption(SdkAdvancedClientOption.SIGNER).get() instanceof AwsS3V4Signer + overrideConfig.retryStrategy().get().maxAttempts() == 4 + def httpClientbuilder = clientConfig.getHttpClientBuilder() as AwsCrtClientBuilderBase + httpClientbuilder.proxyConfiguration.host() == 'host.com' + httpClientbuilder.proxyConfiguration.port() == 80 + httpClientbuilder.proxyConfiguration.scheme() == 'https' + httpClientbuilder.proxyConfiguration.username() == 'user' + httpClientbuilder.proxyConfiguration.password() == 'pass' + httpClientbuilder.getAttributeMap().get(SdkHttpConfigurationOption.CONNECTION_TIMEOUT).toMillis()== 20000 + httpClientbuilder.getAttributeMap().get(SdkHttpConfigurationOption.READ_TIMEOUT) == null //socket timeout not supported in async client + httpClientbuilder.getAttributeMap().get(SdkHttpConfigurationOption.MAX_CONNECTIONS) == 100 + } + + +} diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/util/AwsHelperTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/util/AwsHelperTest.groovy index fba4f9149c..d7b03f7720 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/util/AwsHelperTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/util/AwsHelperTest.groovy @@ -7,7 +7,7 @@ package nextflow.cloud.aws.util -import com.amazonaws.services.s3.model.CannedAccessControlList +import software.amazon.awssdk.services.s3.model.ObjectCannedACL import spock.lang.Specification /** * @@ -17,9 +17,10 @@ class AwsHelperTest extends Specification { def 'should parse S3 acl' () { expect: - AwsHelper.parseS3Acl('PublicRead') == CannedAccessControlList.PublicRead - AwsHelper.parseS3Acl('public-read') == CannedAccessControlList.PublicRead - + AwsHelper.parseS3Acl('PublicRead') == ObjectCannedACL.PUBLIC_READ + AwsHelper.parseS3Acl('public-read') == ObjectCannedACL.PUBLIC_READ + AwsHelper.parseS3Acl('Private') == ObjectCannedACL.PRIVATE + AwsHelper.parseS3Acl('private') == ObjectCannedACL.PRIVATE when: AwsHelper.parseS3Acl('unknown') then: diff --git a/plugins/nf-amazon/src/test/nextflow/processor/PublishDirS3Test.groovy b/plugins/nf-amazon/src/test/nextflow/processor/PublishDirS3Test.groovy index adeb76cd43..d88b96e4c2 100644 --- a/plugins/nf-amazon/src/test/nextflow/processor/PublishDirS3Test.groovy +++ b/plugins/nf-amazon/src/test/nextflow/processor/PublishDirS3Test.groovy @@ -65,8 +65,8 @@ class PublishDirS3Test extends Specification { then: 1 * spy.safeProcessFile(source, _) >> { sourceFile, s3File -> assert s3File instanceof S3Path - assert (s3File as S3Path).getTagsList().find{ it.getKey()=='FOO'}.value == 'this' - assert (s3File as S3Path).getTagsList().find{ it.getKey()=='BAR'}.value == 'that' + assert (s3File as S3Path).getTagsList().find{ it.key()=='FOO'}.value() == 'this' + assert (s3File as S3Path).getTagsList().find{ it.key()=='BAR'}.value() == 'that' } cleanup: