From 8bc7479f5f27906a93ac26022c45e1e911635f78 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Wed, 11 Dec 2024 11:35:07 +0300 Subject: [PATCH 1/8] HDDS-11699. Remove unnecessary information about parts when downloading multipart files. --- .../hadoop/ozone/client/rpc/RpcClient.java | 34 +++++++++---- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 27 ++++++++-- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 49 +++++++++++++++---- .../src/main/proto/OmClientProtocol.proto | 2 + .../hadoop/ozone/om/KeyManagerImpl.java | 23 ++++++++- .../OzoneManagerRequestHandler.java | 1 + 6 files changed, 113 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 93c675d9b90..4d7c7118977 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1765,16 +1765,7 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName) @Override public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, int partNumber) throws IOException { - OmKeyInfo keyInfo = getS3KeyInfo(bucketName, keyName, false); - List filteredKeyLocationInfo = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() - .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == - partNumber) - .collect(Collectors.toList()); - keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false); - keyInfo.setDataSize(filteredKeyLocationInfo.stream() - .mapToLong(OmKeyLocationInfo::getLength) - .sum()); + OmKeyInfo keyInfo = getS3PartKeyInfo(bucketName, keyName, partNumber); return getOzoneKeyDetails(keyInfo); } @@ -1801,6 +1792,29 @@ private OmKeyInfo getS3KeyInfo( return keyInfoWithS3Context.getKeyInfo(); } + @Nonnull + private OmKeyInfo getS3PartKeyInfo( + String bucketName, String keyName, int partNumber) throws IOException { + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + // Volume name is not important, as we call GetKeyInfo with + // assumeS3Context = true, OM will infer the correct s3 volume. + .setVolumeName(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT) + .setBucketName(bucketName) + .setKeyName(keyName) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) + .setLatestVersionLocation(getLatestVersionLocation) + .setForceUpdateContainerCacheFromSCM(false) + .setPartNumber(partNumber) + .build(); + KeyInfoWithVolumeContext keyInfoWithS3Context = + ozoneManagerClient.getKeyInfo(keyArgs, true); + keyInfoWithS3Context.getUserPrincipal().ifPresent(this::updateS3Principal); + return keyInfoWithS3Context.getKeyInfo(); + } + private OmKeyInfo getKeyInfo( String volumeName, String bucketName, String keyName, boolean forceUpdateContainerCache) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index ba28b45a0e5..b935190a5b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -54,6 +54,7 @@ public final class OmKeyArgs implements Auditable { private final boolean headOp; private final boolean forceUpdateContainerCacheFromSCM; private final Map tags; + private final Integer partNumber; // expectedDataGeneration, when used in key creation indicates that a // key with the same keyName should exist with the given generation. // For a key commit to succeed, the original key should still be present with the @@ -82,6 +83,7 @@ private OmKeyArgs(Builder b) { this.ownerName = b.ownerName; this.tags = b.tags; this.expectedDataGeneration = b.expectedDataGeneration; + this.partNumber = b.partNumber; } public boolean getIsMultipartKey() { @@ -168,6 +170,10 @@ public Long getExpectedDataGeneration() { return expectedDataGeneration; } + public Integer getPartNumber() { + return partNumber; + } + @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); @@ -213,6 +219,11 @@ public OmKeyArgs.Builder toBuilder() { if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } + + if (partNumber != null) { + builder.setPartNumber(partNumber); + } + return builder; } @@ -227,7 +238,11 @@ public KeyArgs toProtobuf() { .setLatestVersionLocation(getLatestVersionLocation()) .setHeadOp(isHeadOp()) .setForceUpdateContainerCacheFromSCM( - isForceUpdateContainerCacheFromSCM()); + isForceUpdateContainerCacheFromSCM() + ); + if (partNumber != null) { + builder.setPartNumber(partNumber); + } if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } @@ -257,6 +272,7 @@ public static class Builder { private boolean forceUpdateContainerCacheFromSCM; private final Map tags = new HashMap<>(); private Long expectedDataGeneration = null; + private Integer partNumber; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -308,8 +324,8 @@ public Builder setMultipartUploadID(String uploadID) { return this; } - public Builder setMultipartUploadPartNumber(int partNumber) { - this.multipartUploadPartNumber = partNumber; + public Builder setMultipartUploadPartNumber(int multipartUploadPartNumber) { + this.multipartUploadPartNumber = multipartUploadPartNumber; return this; } @@ -371,6 +387,11 @@ public Builder setExpectedDataGeneration(long generation) { return this; } + public Builder setPartNumber(int partNumber) { + this.partNumber = partNumber; + return this; + } + public OmKeyArgs build() { return new OmKeyArgs(this); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 5c480860d2b..404feb1c8ee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -17,14 +17,6 @@ */ package org.apache.hadoop.ozone.om.helpers; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; - import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; @@ -33,6 +25,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -45,10 +38,18 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.util.Time; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; + /** * Args for key block. The block instance for the key requested in putKey. * This is returned from OM to client, and client use class to talk to @@ -80,6 +81,8 @@ public static Codec getCodec(boolean ignorePipeline) { private String keyName; private long dataSize; private List keyLocationVersions; + private Map> currentlocationsPartsMap; + private Map currentDataSizePartsMap; private final long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; @@ -188,6 +191,14 @@ public String getOwnerName() { return ownerName; } + public Map> getCurrentlocationsPartsMap() { + return currentlocationsPartsMap; + } + + public Map getCurrentDataSizePartsMap() { + return currentDataSizePartsMap; + } + /** * Returns the generation of the object. Note this is currently the same as updateID for a key. * @return long @@ -210,6 +221,25 @@ public void setKeyLocationVersions( this.keyLocationVersions = keyLocationVersions; } + private void refreshCurrentLocationPartsMap() { + if (this.keyLocationVersions.size() > 0) { + this.currentlocationsPartsMap = this.keyLocationVersions.get(keyLocationVersions.size() - 1) + .getLocationList() + .stream() + .filter(it -> it.getPartNumber() != 0) + .collect(Collectors.groupingBy(OmKeyLocationInfo::getPartNumber)); + this.currentDataSizePartsMap = this.currentlocationsPartsMap.entrySet().stream() + .map(it -> Pair.of( + it.getKey(), + it.getValue().stream(). + mapToLong(BlockLocationInfo::getLength) + .sum() + ) + ) + .collect(Collectors.toMap(Pair::getKey, Pair::getValue)); + } + } + public void setFile(boolean file) { isFile = file; } @@ -351,6 +381,7 @@ public synchronized void appendNewBlocks( if (updateTime) { setModificationTime(Time.now()); } + refreshCurrentLocationPartsMap(); } /** diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 92c2b6b4cc5..0b2d74d6b68 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1079,6 +1079,8 @@ message KeyArgs { // This allows a key to be created an committed atomically if the original has not // been modified. optional uint64 expectedDataGeneration = 23; + + optional int32 partNumber = 24; } message KeyLocation { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index ccda21efc93..bac6851c171 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -509,6 +509,27 @@ private OmKeyInfo readKeyInfo(OmKeyArgs args, BucketLayout bucketLayout) if (args.getLatestVersionLocation()) { slimLocationVersion(value); } + Integer partNumberParam = args.getPartNumber(); + if (partNumberParam != null && partNumberParam > 0) { + OmKeyLocationInfoGroup latestLocationVersion = value.getLatestVersionLocations(); + if (latestLocationVersion != null && latestLocationVersion.isMultipartKey()) { + + value.setKeyLocationVersions( + Collections.singletonList( + new OmKeyLocationInfoGroup( + latestLocationVersion.getVersion(), + value.getCurrentlocationsPartsMap() + .getOrDefault(partNumberParam, Collections.emptyList()), + true + ) + ) + ); + value.setDataSize( + value.getCurrentDataSizePartsMap() + .getOrDefault(partNumberParam, 0L) + ); + } + } return value; } @@ -801,7 +822,7 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index ab1f68d9928..87de168b30e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -659,6 +659,7 @@ private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request, .setHeadOp(keyArgs.getHeadOp()) .setForceUpdateContainerCacheFromSCM( keyArgs.getForceUpdateContainerCacheFromSCM()) + .setPartNumber(keyArgs.getPartNumber()) .build(); KeyInfoWithVolumeContext keyInfo = impl.getKeyInfo(omKeyArgs, request.getAssumeS3Context()); From e69687847889b3778b07d78e383c7f1f0417b063 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Wed, 11 Dec 2024 17:54:41 +0300 Subject: [PATCH 2/8] HDDS-11699. Try to fix failed tests. --- .../java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 404feb1c8ee..cc7d9a42ca5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -192,6 +192,9 @@ public String getOwnerName() { } public Map> getCurrentlocationsPartsMap() { + if (currentDataSizePartsMap == null) { + refreshCurrentLocationPartsMap(); + } return currentlocationsPartsMap; } From fbfa61f8b423c696f58c8e205a1515df18507c13 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 12 Dec 2024 15:14:13 +0300 Subject: [PATCH 3/8] HDDS-11699. Fix review notices. --- .../hadoop/ozone/client/rpc/RpcClient.java | 2 +- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 20 +------ .../hadoop/ozone/om/helpers/OmKeyInfo.java | 52 ++++--------------- .../src/main/proto/OmClientProtocol.proto | 2 - .../hadoop/ozone/om/KeyManagerImpl.java | 24 +++++---- .../OzoneManagerRequestHandler.java | 2 +- 6 files changed, 28 insertions(+), 74 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 4d7c7118977..6e7892a7e46 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1807,7 +1807,7 @@ private OmKeyInfo getS3PartKeyInfo( .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .setForceUpdateContainerCacheFromSCM(false) - .setPartNumber(partNumber) + .setMultipartUploadPartNumber(partNumber) .build(); KeyInfoWithVolumeContext keyInfoWithS3Context = ozoneManagerClient.getKeyInfo(keyArgs, true); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index b935190a5b2..106ef6a06ab 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -54,7 +54,6 @@ public final class OmKeyArgs implements Auditable { private final boolean headOp; private final boolean forceUpdateContainerCacheFromSCM; private final Map tags; - private final Integer partNumber; // expectedDataGeneration, when used in key creation indicates that a // key with the same keyName should exist with the given generation. // For a key commit to succeed, the original key should still be present with the @@ -83,7 +82,6 @@ private OmKeyArgs(Builder b) { this.ownerName = b.ownerName; this.tags = b.tags; this.expectedDataGeneration = b.expectedDataGeneration; - this.partNumber = b.partNumber; } public boolean getIsMultipartKey() { @@ -170,10 +168,6 @@ public Long getExpectedDataGeneration() { return expectedDataGeneration; } - public Integer getPartNumber() { - return partNumber; - } - @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); @@ -220,10 +214,6 @@ public OmKeyArgs.Builder toBuilder() { builder.setExpectedDataGeneration(expectedDataGeneration); } - if (partNumber != null) { - builder.setPartNumber(partNumber); - } - return builder; } @@ -240,8 +230,8 @@ public KeyArgs toProtobuf() { .setForceUpdateContainerCacheFromSCM( isForceUpdateContainerCacheFromSCM() ); - if (partNumber != null) { - builder.setPartNumber(partNumber); + if (multipartUploadPartNumber != 0) { + builder.setMultipartNumber(multipartUploadPartNumber); } if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); @@ -272,7 +262,6 @@ public static class Builder { private boolean forceUpdateContainerCacheFromSCM; private final Map tags = new HashMap<>(); private Long expectedDataGeneration = null; - private Integer partNumber; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -387,11 +376,6 @@ public Builder setExpectedDataGeneration(long generation) { return this; } - public Builder setPartNumber(int partNumber) { - this.partNumber = partNumber; - return this; - } - public OmKeyArgs build() { return new OmKeyArgs(this); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index cc7d9a42ca5..5c480860d2b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -17,6 +17,14 @@ */ package org.apache.hadoop.ozone.om.helpers; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; + import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; @@ -25,7 +33,6 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -38,18 +45,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.util.Time; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.stream.Collectors; - /** * Args for key block. The block instance for the key requested in putKey. * This is returned from OM to client, and client use class to talk to @@ -81,8 +80,6 @@ public static Codec getCodec(boolean ignorePipeline) { private String keyName; private long dataSize; private List keyLocationVersions; - private Map> currentlocationsPartsMap; - private Map currentDataSizePartsMap; private final long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; @@ -191,17 +188,6 @@ public String getOwnerName() { return ownerName; } - public Map> getCurrentlocationsPartsMap() { - if (currentDataSizePartsMap == null) { - refreshCurrentLocationPartsMap(); - } - return currentlocationsPartsMap; - } - - public Map getCurrentDataSizePartsMap() { - return currentDataSizePartsMap; - } - /** * Returns the generation of the object. Note this is currently the same as updateID for a key. * @return long @@ -224,25 +210,6 @@ public void setKeyLocationVersions( this.keyLocationVersions = keyLocationVersions; } - private void refreshCurrentLocationPartsMap() { - if (this.keyLocationVersions.size() > 0) { - this.currentlocationsPartsMap = this.keyLocationVersions.get(keyLocationVersions.size() - 1) - .getLocationList() - .stream() - .filter(it -> it.getPartNumber() != 0) - .collect(Collectors.groupingBy(OmKeyLocationInfo::getPartNumber)); - this.currentDataSizePartsMap = this.currentlocationsPartsMap.entrySet().stream() - .map(it -> Pair.of( - it.getKey(), - it.getValue().stream(). - mapToLong(BlockLocationInfo::getLength) - .sum() - ) - ) - .collect(Collectors.toMap(Pair::getKey, Pair::getValue)); - } - } - public void setFile(boolean file) { isFile = file; } @@ -384,7 +351,6 @@ public synchronized void appendNewBlocks( if (updateTime) { setModificationTime(Time.now()); } - refreshCurrentLocationPartsMap(); } /** diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 0b2d74d6b68..92c2b6b4cc5 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1079,8 +1079,6 @@ message KeyArgs { // This allows a key to be created an committed atomically if the original has not // been modified. optional uint64 expectedDataGeneration = 23; - - optional int32 partNumber = 24; } message KeyLocation { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index bac6851c171..c1b98e81f72 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -509,25 +509,31 @@ private OmKeyInfo readKeyInfo(OmKeyArgs args, BucketLayout bucketLayout) if (args.getLatestVersionLocation()) { slimLocationVersion(value); } - Integer partNumberParam = args.getPartNumber(); - if (partNumberParam != null && partNumberParam > 0) { + int partNumberParam = args.getMultipartUploadPartNumber(); + if (partNumberParam > 0) { OmKeyLocationInfoGroup latestLocationVersion = value.getLatestVersionLocations(); if (latestLocationVersion != null && latestLocationVersion.isMultipartKey()) { + List currentLocations = + value.getKeyLocationVersions().get(value.getKeyLocationVersions().size() - 1) + .getLocationList() + .stream() + .filter(it -> it.getPartNumber() == partNumberParam) + .collect(Collectors.toList()); value.setKeyLocationVersions( Collections.singletonList( new OmKeyLocationInfoGroup( latestLocationVersion.getVersion(), - value.getCurrentlocationsPartsMap() - .getOrDefault(partNumberParam, Collections.emptyList()), + currentLocations, true ) ) ); - value.setDataSize( - value.getCurrentDataSizePartsMap() - .getOrDefault(partNumberParam, 0L) - ); + + long dataLength = currentLocations.stream() + .mapToLong(BlockLocationInfo::getLength) + .sum(); + value.setDataSize(dataLength); } } return value; @@ -822,7 +828,7 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 87de168b30e..3c99e26eac2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -659,7 +659,7 @@ private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request, .setHeadOp(keyArgs.getHeadOp()) .setForceUpdateContainerCacheFromSCM( keyArgs.getForceUpdateContainerCacheFromSCM()) - .setPartNumber(keyArgs.getPartNumber()) + .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) .build(); KeyInfoWithVolumeContext keyInfo = impl.getKeyInfo(omKeyArgs, request.getAssumeS3Context()); From ce799535fbbb0812f83c7af142f642357abe1e28 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Tue, 24 Dec 2024 16:09:53 +0300 Subject: [PATCH 4/8] HDDS-11699. Wrote unit and integration tests --- ...TestOzoneClientMultipartUploadWithFSO.java | 155 +++++++++++++++--- .../hadoop/ozone/om/TestKeyManagerImpl.java | 102 +++++++++++- .../s3/awssdk/v1/AbstractS3SDKV1Tests.java | 60 +++++++ 3 files changed, 291 insertions(+), 26 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index b943930f62f..f2e9e8777a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -40,6 +40,8 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneKeyLocation; import org.apache.hadoop.ozone.client.OzoneMultipartUpload; import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -179,12 +181,12 @@ public void preTest() throws Exception { @Test public void testInitiateMultipartUploadWithReplicationInformationSet() throws IOException { - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); // Call initiate multipart upload for the same key again, this should // generate a new uploadID. - String uploadIDNew = initiateMultipartUpload(bucket, keyName, + String uploadIDNew = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); assertNotEquals(uploadIDNew, uploadID); } @@ -216,7 +218,7 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws @Test public void testUploadPartWithNoOverride() throws IOException { String sampleData = "sample Value"; - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, @@ -235,7 +237,7 @@ public void testUploadPartWithNoOverride() throws IOException { @Test public void testUploadPartOverrideWithRatis() throws Exception { String sampleData = "sample Value"; - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, THREE); int partNumber = 1; @@ -348,7 +350,7 @@ private OzoneBucket getOzoneECBucket(String myBucket) @Test public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // Upload Parts @@ -371,7 +373,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { public void testMultipartUploadWithDiscardedUnusedPartSize() throws Exception { // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); byte[] data = generateData(10000000, (byte) 97); // Upload Parts @@ -402,7 +404,7 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() @Test public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -417,7 +419,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() @Test public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -432,7 +434,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() @Test public void testMultipartUploadWithMissingParts() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -447,7 +449,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { @Test public void testMultipartPartNumberExceedingAllowedRange() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); byte[] data = "data".getBytes(UTF_8); @@ -469,7 +471,7 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception { public void testCommitPartAfterCompleteUpload() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4); @@ -530,7 +532,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // Do not close output stream. @@ -550,7 +552,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -567,7 +569,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -599,7 +601,7 @@ public void testListMultipartUploadParts() throws Exception { keyName = parentDir + "file-ABC"; Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -688,7 +690,7 @@ private String verifyPartNames(Map partsMap, int index, public void testListMultipartUploadPartsWithContinuation() throws Exception { Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -752,7 +754,7 @@ public void testListPartsWithInvalidInputs(int partNumberMarker, int maxParts, S @Test public void testListPartsWithPartMarkerGreaterThanPartCount() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -795,11 +797,11 @@ public void testListMultipartUpload() throws Exception { keys.add(key3); // Initiate multipart upload - String uploadID1 = initiateMultipartUpload(bucket, key1, RATIS, + String uploadID1 = initiateMultipartUploadWithAsserts(bucket, key1, RATIS, ONE); - String uploadID2 = initiateMultipartUpload(bucket, key2, RATIS, + String uploadID2 = initiateMultipartUploadWithAsserts(bucket, key2, RATIS, ONE); - String uploadID3 = initiateMultipartUpload(bucket, key3, RATIS, + String uploadID3 = initiateMultipartUploadWithAsserts(bucket, key3, RATIS, ONE); // Upload Parts @@ -854,6 +856,103 @@ public void testListMultipartUpload() throws Exception { assertEquals(0, expectedList.size()); } + @Test + void testGetAllPartsWhenZeroPartNumber() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + + OzoneKeyDetails s3KeyDetailsWithAllParts = ozClient.getProxy() + .getS3KeyDetails(s3Bucket.getName(), keyName, 0); + List ozoneKeyLocations = s3KeyDetailsWithAllParts.getOzoneKeyLocations(); + assertEquals(6, ozoneKeyLocations.size()); + } + + @Test + void testGetParticularPart() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + + OzoneKeyDetails s3KeyDetailsOneParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 1); + assertEquals(2, s3KeyDetailsOneParts.getOzoneKeyLocations().size()); + + OzoneKeyDetails s3KeyDetailsTwoParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 2); + assertEquals(2, s3KeyDetailsTwoParts.getOzoneKeyLocations().size()); + + OzoneKeyDetails s3KeyDetailsThreeParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 3); + assertEquals(2, s3KeyDetailsThreeParts.getOzoneKeyLocations().size()); + } + + @Test + void testGetNotExistedPart() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + + OzoneKeyDetails s3KeyDetailsWithNotExistedParts = ozClient.getProxy() + .getS3KeyDetails(s3Bucket.getName(), keyName, 4); + List ozoneKeyLocations = s3KeyDetailsWithNotExistedParts.getOzoneKeyLocations(); + assertEquals(0, ozoneKeyLocations.size()); + } + private String verifyUploadedPart(String uploadID, String partName, OMMetadataManager metadataMgr) throws IOException { OzoneManager ozoneManager = cluster.getOzoneManager(); @@ -891,11 +990,10 @@ private String verifyUploadedPart(String uploadID, String partName, return multipartKey; } - private String initiateMultipartUpload(OzoneBucket oBucket, String kName, - ReplicationType replicationType, ReplicationFactor replicationFactor) - throws IOException { - OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, - replicationType, replicationFactor); + private String initiateMultipartUploadWithAsserts( + OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor + ) throws IOException { + OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, replicationType, replicationFactor); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -907,6 +1005,13 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, return uploadID; } + private String initiateMultipartUpload( + OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor + ) throws IOException { + OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, replicationType, replicationFactor); + return multipartInfo.getUploadID(); + } + private Pair uploadPart(OzoneBucket oBucket, String kName, String uploadID, int partNumber, byte[] data) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index e9c9b946c8e..7227918aa04 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -1518,7 +1518,107 @@ public void testRefreshPipelineException() throws Exception { assertEquals(errorMessage, omEx.getMessage()); } - /** + @Test + void testGetAllPartsWhenZeroPartNumber() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(0) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(5, locationList.size()); + for (int i = 0; i < 5; i++) { + assertEquals(i, locationList.get(i).getPartNumber()); + } + } + + @Test + void testGetParticularPart() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(3) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(1, locationList.size()); + assertEquals(3, locationList.get(0).getPartNumber()); + } + + @Test + void testGetNotExistedPart() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(99) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(0, locationList.size()); + } + + private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException { + List locationInfoGroups = new ArrayList<>(); + List locationInfoList = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + OmKeyLocationInfo locationInfo1 = new OmKeyLocationInfo.Builder() + .setBlockID(new BlockID(i, i)) + .setPartNumber(i) + .build(); + locationInfoList.add(locationInfo1); + } + + OmKeyLocationInfoGroup locationInfoGroup = new OmKeyLocationInfoGroup(1, locationInfoList); + locationInfoGroups.add(locationInfoGroup); + locationInfoGroup.setMultipartKey(true); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setKeyName(keyName) + .setBucketName(BUCKET_NAME) + .setVolumeName(volume) + .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) + .setOmKeyLocationInfos(locationInfoGroups) + .build(); + + String key = String.format("/%s/%s/%s", volume, BUCKET_NAME, keyName); + metadataManager.getKeyTable(BucketLayout.LEGACY).put(key, omKeyInfo); + } + + /** * Get Random pipeline. * @return pipeline */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index ab56af670b3..661afaf0772 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -27,6 +27,7 @@ import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; import com.amazonaws.services.s3.model.CreateBucketRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.Grantee; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; @@ -736,6 +737,65 @@ public void testListParts(@TempDir Path tempDir) throws Exception { } } + @Test + public void testGetParticularPart(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (15 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, null, null); + + GetObjectRequest getObjectRequestAll = new GetObjectRequest(bucketName, keyName); + getObjectRequestAll.setPartNumber(0); + S3Object s3ObjectAll = s3Client.getObject(getObjectRequestAll); + long allPartContentLength = s3ObjectAll.getObjectMetadata().getContentLength(); + + GetObjectRequest getObjectRequestOne = new GetObjectRequest(bucketName, keyName); + getObjectRequestOne.setPartNumber(1); + S3Object s3ObjectOne = s3Client.getObject(getObjectRequestOne); + long partOneContentLength = s3ObjectOne.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partOneContentLength); + + GetObjectRequest getObjectRequestTwo = new GetObjectRequest(bucketName, keyName); + getObjectRequestTwo.setPartNumber(2); + S3Object s3ObjectTwo = s3Client.getObject(getObjectRequestTwo); + long partTwoContentLength = s3ObjectTwo.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partTwoContentLength); + + GetObjectRequest getObjectRequestThree = new GetObjectRequest(bucketName, keyName); + getObjectRequestThree.setPartNumber(1); + S3Object s3ObjectThree = s3Client.getObject(getObjectRequestTwo); + long partThreeContentLength = s3ObjectThree.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partThreeContentLength); + + assertEquals(allPartContentLength, (partOneContentLength + partTwoContentLength + partThreeContentLength)); + } + + @Test + public void testGetNotExistedPart(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (15 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, null, null); + + GetObjectRequest getObjectRequestOne = new GetObjectRequest(bucketName, keyName); + getObjectRequestOne.setPartNumber(4); + S3Object s3ObjectOne = s3Client.getObject(getObjectRequestOne); + long partOneContentLength = s3ObjectOne.getObjectMetadata().getContentLength(); + assertEquals(0, partOneContentLength); + } + @Test public void testListPartsNotFound() { final String bucketName = getBucketName(); From 487a563d545186ccc5fc4e38c14964af5799c849 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 26 Dec 2024 10:23:44 +0300 Subject: [PATCH 5/8] HDDS-11699. Fix review notices. --- .../hadoop/ozone/OzoneManagerVersion.java | 3 ++ .../hadoop/ozone/client/rpc/RpcClient.java | 16 ++++++++- .../src/main/smoketest/s3/objectputget.robot | 33 +++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 2d0b2bb56fd..d46cdeaf1fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -50,6 +50,9 @@ public enum OzoneManagerVersion implements ComponentVersion { S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), + S3_PART_AWARE_GET(10, "OzoneManager version that supports S3 get for a specific multipart " + + "upload part number"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 6e7892a7e46..b499f1675a9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1765,7 +1765,21 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName) @Override public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, int partNumber) throws IOException { - OmKeyInfo keyInfo = getS3PartKeyInfo(bucketName, keyName, partNumber); + OmKeyInfo keyInfo; + if (omVersion.compareTo(OzoneManagerVersion.S3_PART_AWARE_GET) >= 0) { + keyInfo = getS3PartKeyInfo(bucketName, keyName, partNumber); + } else { + keyInfo = getS3KeyInfo(bucketName, keyName, false); + List filteredKeyLocationInfo = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() + .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == + partNumber) + .collect(Collectors.toList()); + keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false); + keyInfo.setDataSize(filteredKeyLocationInfo.stream() + .mapToLong(OmKeyLocationInfo::getLength) + .sum()); + } return getOzoneKeyDetails(keyInfo); } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 82a985f1d50..4e9bcfe1a0a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -270,3 +270,36 @@ Create key twice with different content and expect different ETags Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences Execute rm -rf /tmp/file1 Execute rm -rf /tmp/file2 + +Create&Download big file by multipart upload and get file via part numbers + Execute head -c 10000000 /tmp/big_file + ${result} Execute AWSS3CliDebug cp /tmp/big_file s3://${BUCKET}/ + ${get_part_1_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_1 --part-number 1 + ${part_1_size} = Execute and checkrc echo '${get_part_1_response}' | jq -r '.ContentLength' 0 + Should contain ${get_part_1_response} \"PartsCount\": 2 + ${get_part_2_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_2 --part-number 2 + ${part_2_size} = Execute and checkrc echo '${get_part_2_response}' | jq -r '.ContentLength' 0 + Should contain ${get_part_2_response} \"PartsCount\": 2 + + Should Be Equal As Integers 10000000 ${${part_1_size} + ${part_2_size}} + + ${get_part_3_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_3 --part-number 3 + Should contain ${get_part_3_response} \"ContentLength\": 0 + Should contain ${get_part_3_response} \"PartsCount\": 2 + # clean up + Execute AWSS3Cli rm s3://${BUCKET}/big_file + Execute rm -rf /tmp/big_file + Execute rm -rf /tmp/big_file_1 + Execute rm -rf /tmp/big_file_2 + Execute rm -rf /tmp/big_file_3 + +Create&Download big file by multipart upload and get file not existed part number + Execute head -c 10000000 /tmp/big_file + ${result} Execute AWSS3CliDebug cp /tmp/big_file s3://${BUCKET}/ + ${get_part_99_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_1 --part-number 99 + Should contain ${get_part_99_response} \"ContentLength\": 0 + Should contain ${get_part_99_response} \"PartsCount\": 2 + # clean up + Execute AWSS3Cli rm s3://${BUCKET}/big_file + Execute rm -rf /tmp/big_file + Execute rm -rf /tmp/big_file_1 \ No newline at end of file From 3fc4af1a475b8b77d4e4b53ec287c490affc1f22 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 9 Jan 2025 12:11:02 +0300 Subject: [PATCH 6/8] HDDS-11699. Fix review notices --- ...TestOzoneClientMultipartUploadWithFSO.java | 2 ++ .../hadoop/ozone/om/KeyManagerImpl.java | 21 +++++-------------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index f2e9e8777a6..58183e87705 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -912,6 +912,8 @@ void testGetParticularPart() throws Exception { s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); +// OzoneKeyLocations size is 2 because part size is 5MB and ozone.scm.block.size in ozone-site.xml +// for integration-test is 4MB OzoneKeyDetails s3KeyDetailsOneParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 1); assertEquals(2, s3KeyDetailsOneParts.getOzoneKeyLocations().size()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c1b98e81f72..8e3bbb47c3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -513,27 +513,16 @@ private OmKeyInfo readKeyInfo(OmKeyArgs args, BucketLayout bucketLayout) if (partNumberParam > 0) { OmKeyLocationInfoGroup latestLocationVersion = value.getLatestVersionLocations(); if (latestLocationVersion != null && latestLocationVersion.isMultipartKey()) { - List currentLocations = - value.getKeyLocationVersions().get(value.getKeyLocationVersions().size() - 1) - .getLocationList() + List currentLocations = latestLocationVersion.getBlocksLatestVersionOnly() .stream() .filter(it -> it.getPartNumber() == partNumberParam) .collect(Collectors.toList()); - value.setKeyLocationVersions( - Collections.singletonList( - new OmKeyLocationInfoGroup( - latestLocationVersion.getVersion(), - currentLocations, - true - ) - ) - ); - - long dataLength = currentLocations.stream() + value.updateLocationInfoList(currentLocations, true, true); + + value.setDataSize(currentLocations.stream() .mapToLong(BlockLocationInfo::getLength) - .sum(); - value.setDataSize(dataLength); + .sum()); } } return value; From 83be062507899bfe146533f3405368d0f52d11fa Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 9 Jan 2025 12:14:33 +0300 Subject: [PATCH 7/8] HDDS-11699. Fix review notices --- .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index b499f1675a9..2f28d66d7a7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1775,7 +1775,7 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == partNumber) .collect(Collectors.toList()); - keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false); + keyInfo.updateLocationInfoList(filteredKeyLocationInfo, true, true); keyInfo.setDataSize(filteredKeyLocationInfo.stream() .mapToLong(OmKeyLocationInfo::getLength) .sum()); From cf040a8d729f5d17cc0c896baeba5457b839a0a4 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 9 Jan 2025 18:04:59 +0300 Subject: [PATCH 8/8] HDDS-11699. Fix failed tests --- .../java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 7227918aa04..c3ceb0f6209 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -1602,7 +1602,7 @@ private void initKeyTableForMultipartTest(String keyName, String volume) throws locationInfoList.add(locationInfo1); } - OmKeyLocationInfoGroup locationInfoGroup = new OmKeyLocationInfoGroup(1, locationInfoList); + OmKeyLocationInfoGroup locationInfoGroup = new OmKeyLocationInfoGroup(0, locationInfoList); locationInfoGroups.add(locationInfoGroup); locationInfoGroup.setMultipartKey(true);