diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index ff054c350956..0b26abafbfba 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -27,6 +27,7 @@
import java.util.ListIterator;
import java.util.Map;
import java.util.Objects;
+import java.util.OptionalLong;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
@@ -34,6 +35,7 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.storage.StreamBuffer;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -255,7 +257,10 @@ void commitKey(long offset) throws IOException {
commitUploadPartInfo =
omClient.commitMultipartUploadPart(buildKeyArgs(), openID);
} else {
- omClient.commitKey(buildKeyArgs(), openID);
+ final OptionalLong modificationTime = omClient.commitKeyWithModificationTime(buildKeyArgs(), openID);
+ if (modificationTime.isPresent()) {
+ metadata.put(OzoneConsts.MODIFICATION_TIME, String.valueOf(modificationTime.getAsLong()));
+ }
}
} else {
LOG.warn("Closing KeyDataStreamOutput, but key args is null");
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index f3b98626f093..3976514606ee 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -28,6 +28,7 @@
import java.util.ListIterator;
import java.util.Map;
import java.util.Objects;
+import java.util.OptionalLong;
import java.util.concurrent.ExecutorService;
import java.util.function.Supplier;
import org.apache.hadoop.hdds.client.ContainerBlockID;
@@ -39,6 +40,7 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.storage.BufferPool;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -330,7 +332,10 @@ void commitKey(long offset) throws IOException {
commitUploadPartInfo =
omClient.commitMultipartUploadPart(buildKeyArgs(), openID);
} else {
- omClient.commitKey(buildKeyArgs(), openID);
+ final OptionalLong modificationTime = omClient.commitKeyWithModificationTime(buildKeyArgs(), openID);
+ if (modificationTime.isPresent()) {
+ metadata.put(OzoneConsts.MODIFICATION_TIME, String.valueOf(modificationTime.getAsLong()));
+ }
}
} else {
LOG.warn("Closing KeyOutputStream, but key args is null");
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
index 93774a82dc5a..ed5ab1f0e0b3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.ozone.om.helpers;
+import java.util.OptionalLong;
+
/**
* This class holds information about the response from commit multipart
* upload part request.
@@ -27,9 +29,16 @@ public class OmMultipartCommitUploadPartInfo {
private final String eTag;
+ private final Long modificationTime;
+
public OmMultipartCommitUploadPartInfo(String partName, String eTag) {
+ this(partName, eTag, null);
+ }
+
+ public OmMultipartCommitUploadPartInfo(String partName, String eTag, Long modificationTime) {
this.partName = partName;
this.eTag = eTag;
+ this.modificationTime = modificationTime;
}
public String getETag() {
@@ -39,4 +48,8 @@ public String getETag() {
public String getPartName() {
return partName;
}
+
+ public OptionalLong getModificationTime() {
+ return modificationTime == null ? OptionalLong.empty() : OptionalLong.of(modificationTime);
+ }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index 5e4dafb925bc..27a6a8f4e85e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -22,6 +22,7 @@
import java.io.IOException;
import java.util.List;
import java.util.Map;
+import java.util.OptionalLong;
import java.util.UUID;
import org.apache.hadoop.fs.SafeModeAction;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
@@ -251,6 +252,17 @@ default void commitKey(OmKeyArgs args, long clientID)
"this to be implemented, as write requests use a new approach.");
}
+ /**
+ * Commit a key and optionally return the stored modification time (epoch millis).
+ *
+ * This is backward compatible with older OM versions which do not return
+ * the value in {@code CommitKeyResponse}.
+ */
+ default OptionalLong commitKeyWithModificationTime(OmKeyArgs args, long clientID) throws IOException {
+ commitKey(args, clientID);
+ return OptionalLong.empty();
+ }
+
/**
* Synchronize the key length. This will make the change from the client
* visible. The client is identified by the clientID.
@@ -1096,7 +1108,6 @@ default CancelPrepareResponse cancelOzoneManagerPrepare() throws IOException {
EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp,
boolean writeToRatis) throws IOException;
-
/**
* Start the lease recovery of a file.
*
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/S3Auth.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/S3Auth.java
index fa023dfc8119..577339c96ac3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/S3Auth.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/S3Auth.java
@@ -29,6 +29,8 @@ public class S3Auth {
private String userPrincipal;
// Optional STS session token when using temporary credentials
private String sessionToken;
+ // S3 action without s3: prefix (e.g. PutObject), set by S3 Gateway for use in finer-grained STS permissions.
+ private String s3Action;
public S3Auth(final String stringToSign,
final String signature,
@@ -67,4 +69,12 @@ public String getSessionToken() {
public void setSessionToken(String sessionToken) {
this.sessionToken = sessionToken;
}
+
+ public String getS3Action() {
+ return s3Action;
+ }
+
+ public void setS3Action(String s3Action) {
+ this.s3Action = s3Action;
+ }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index b42b0e6ddf87..a0f3f56c8937 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -38,6 +38,7 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.OptionalLong;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
@@ -328,6 +329,9 @@ private OMResponse submitRequest(OMRequest omRequest)
if (threadLocalS3Auth.get().getSessionToken() != null) {
s3AuthBuilder.setSessionToken(threadLocalS3Auth.get().getSessionToken());
}
+ if (threadLocalS3Auth.get().getS3Action() != null) {
+ s3AuthBuilder.setS3Action(threadLocalS3Auth.get().getS3Action());
+ }
builder.setS3Authentication(s3AuthBuilder.build());
}
@@ -818,6 +822,11 @@ public void commitKey(OmKeyArgs args, long clientId)
updateKey(args, clientId, false, false);
}
+ @Override
+ public OptionalLong commitKeyWithModificationTime(OmKeyArgs args, long clientId) throws IOException {
+ return updateKeyAndGetModificationTime(args, clientId, false, false);
+ }
+
@Override
public void recoverKey(OmKeyArgs args, long clientId)
throws IOException {
@@ -839,6 +848,12 @@ public static void setReplicationConfig(ReplicationConfig replication,
private void updateKey(OmKeyArgs args, long clientId, boolean hsync, boolean recovery)
throws IOException {
+ // Preserve legacy behavior (ignore response payload).
+ updateKeyAndGetModificationTime(args, clientId, hsync, recovery);
+ }
+
+ private OptionalLong updateKeyAndGetModificationTime(OmKeyArgs args, long clientId, boolean hsync, boolean recovery)
+ throws IOException {
CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
List locationInfoList = args.getLocationInfoList();
Objects.requireNonNull(locationInfoList, "locationInfoList == null");
@@ -864,7 +879,11 @@ private void updateKey(OmKeyArgs args, long clientId, boolean hsync, boolean rec
.setCommitKeyRequest(req)
.build();
- handleError(submitRequest(omRequest));
+ final OMResponse resp = handleError(submitRequest(omRequest));
+ if (resp.hasCommitKeyResponse() && resp.getCommitKeyResponse().hasModificationTime()) {
+ return OptionalLong.of(resp.getCommitKeyResponse().getModificationTime());
+ }
+ return OptionalLong.empty();
}
@Override
@@ -1703,10 +1722,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
handleError(submitRequest(omRequest))
.getCommitMultiPartUploadResponse();
- OmMultipartCommitUploadPartInfo info = new
- OmMultipartCommitUploadPartInfo(response.getPartName(),
- response.getETag());
- return info;
+ final Long modificationTime = response.hasModificationTime() ? response.getModificationTime() : null;
+ return new OmMultipartCommitUploadPartInfo(response.getPartName(), response.getETag(), modificationTime);
}
@Override
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 9bb0d801ee7b..41c8ebd27dcc 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1572,7 +1572,8 @@ message CommitKeyRequest {
}
message CommitKeyResponse {
-
+ // Modification time of the committed key, set by OM during preExecute.
+ optional uint64 modificationTime = 1;
}
message AllocateBlockRequest {
@@ -1777,6 +1778,8 @@ message MultipartCommitUploadPartResponse {
optional string partName = 1;
// This one is returned as Etag for S3.
optional string eTag = 2;
+ // Modification time of the committed part key, set by OM during preExecute.
+ optional uint64 modificationTime = 3;
}
message MultipartUploadCompleteRequest {
@@ -2311,6 +2314,17 @@ message S3Authentication {
// If present, indicates this request uses STS temporary credentials
// and carries the base64-encoded session token for validation.
optional string sessionToken = 4;
+ // The following fields are resolved from the STS session token by OM.
+ // They are used to enforce STS session policies during Ratis apply.
+ // They must be written or cleared by the OM leader when the token is validated.
+ optional string resolvedStsSessionPolicy = 5;
+ optional string resolvedStsRoleArn = 6;
+ optional string resolvedStsOriginalAccessKeyId = 7;
+ optional string resolvedStsTempAccessKeyId = 8;
+ optional string resolvedStsSecretKeyId = 9;
+ // S3 action without the s3: prefix for this request (e.g. GetObject), set by S3 Gateway for use
+ // in finer-grained STS permissions.
+ optional string s3Action = 10;
}
message RecoverLeaseRequest {
diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock
index 0271bd8a20f1..15817492a551 100644
--- a/hadoop-ozone/interface-client/src/main/resources/proto.lock
+++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock
@@ -5803,7 +5803,15 @@
]
},
{
- "name": "CommitKeyResponse"
+ "name": "CommitKeyResponse",
+ "fields": [
+ {
+ "id": 1,
+ "name": "modificationTime",
+ "type": "uint64",
+ "optional": true
+ }
+ ]
},
{
"name": "AllocateBlockRequest",
@@ -6372,6 +6380,12 @@
"name": "eTag",
"type": "string",
"optional": true
+ },
+ {
+ "id": 3,
+ "name": "modificationTime",
+ "type": "uint64",
+ "optional": true
}
]
},
@@ -8656,4 +8670,4 @@
}
}
]
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
index b14f01cf6ba0..1ef815d67555 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.protocolPB.grpc.GrpcClientConstants;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication;
import org.apache.hadoop.ozone.security.STSTokenIdentifier;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
@@ -236,9 +237,7 @@ public List listStatus(OmKeyArgs args, boolean recursive,
try {
if (isAclEnabled) {
if (isStsS3Request()) {
- // We need to be able to tell the difference between being able to download a file and merely seeing the file
- // name in a list. Use READ for download ability and LIST (here) for listing.
- // When listPrefix is set (original S3 ListObjects prefix), authorize LIST on that prefix for the whole
+ // When listPrefix is set (original S3 ListObjects prefix), authorize READ on that prefix for the whole
// listing, including FSO traversal where keyName is an internal directory (e.g. userA) under prefix user.
final String listPrefix = args.getListPrefix();
final String keyName = args.getKeyName();
@@ -258,7 +257,7 @@ public List listStatus(OmKeyArgs args, boolean recursive,
} else {
aclKey = "*";
}
- checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.LIST, bucket.realVolume(), bucket.realBucket(), aclKey);
+ checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, bucket.realVolume(), bucket.realBucket(), aclKey);
} else {
checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ,
bucket, args.getKeyName());
@@ -304,12 +303,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
try {
if (isAclEnabled) {
- if (isStsS3Request()) {
- checkAcls(getResourceType(args), StoreType.OZONE, ACLType.LIST, bucket, args.getKeyName());
- } else {
- checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ,
- bucket, args.getKeyName());
- }
+ checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, bucket, args.getKeyName());
}
metrics.incNumGetFileStatus();
return keyManager.getFileStatus(args, getClientAddress());
@@ -384,7 +378,7 @@ public ListKeysResult listKeys(String volumeName, String bucketName,
final String aclKey = (keyPrefix == null || keyPrefix.isEmpty()) ? "*" : keyPrefix;
captureLatencyNs(
perfMetrics.getListKeysAclCheckLatencyNs(), () -> checkAcls(
- ResourceType.KEY, StoreType.OZONE, ACLType.LIST, bucket.realVolume(), bucket.realBucket(), aclKey));
+ ResourceType.KEY, StoreType.OZONE, ACLType.READ, bucket.realVolume(), bucket.realBucket(), aclKey));
} else {
captureLatencyNs(perfMetrics.getListKeysAclCheckLatencyNs(), () ->
checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST,
@@ -634,7 +628,8 @@ public boolean checkAcls(ResourceType resType, StoreType storeType,
public boolean checkAcls(OzoneObj obj, RequestContext context,
boolean throwIfPermissionDenied) throws OMException {
- final RequestContext normalizedRequestContext = maybeAttachSessionPolicyFromThreadLocal(context);
+ final RequestContext normalizedRequestContext = maybeAttachS3ActionFromThreadLocal(
+ maybeAttachSessionPolicyFromThreadLocal(context));
if (!captureLatencyNs(perfMetrics::setCheckAccessLatencyNs,
() -> accessAuthorizer.checkAccess(obj, normalizedRequestContext))) {
@@ -692,6 +687,22 @@ private RequestContext maybeAttachSessionPolicyFromThreadLocal(RequestContext co
.build();
}
+ /**
+ * Attaches s3 action to RequestContext if an S3Authentication is found in the Ozone Manager thread local,
+ * and it has an s3 action. Otherwise, returns the RequestContext as it was before.
+ * @param context the original RequestContext
+ * @return RequestContext as before or with s3 action embedded
+ */
+ private RequestContext maybeAttachS3ActionFromThreadLocal(RequestContext context) {
+ final S3Authentication s3Authentication = OzoneManager.getS3Auth();
+ if (s3Authentication == null || !s3Authentication.hasS3Action()) {
+ return context;
+ }
+ return context.toBuilder()
+ .setS3Action(s3Authentication.getS3Action())
+ .build();
+ }
+
static String getClientAddress() {
String clientMachine = Server.getRemoteAddress();
if (clientMachine == null) { //not a RPC client
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
index 09a530ab6cc7..58f70af236c8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
@@ -55,6 +55,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler;
import org.apache.hadoop.ozone.protocolPB.RequestHandler;
+import org.apache.hadoop.ozone.security.STSSecurityUtil;
+import org.apache.hadoop.ozone.security.STSTokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
@@ -617,7 +619,34 @@ public void close() {
*/
@VisibleForTesting
OMResponse runCommand(OMRequest request, TermIndex termIndex) {
+ boolean isS3AuthThreadLocalSet = false;
+ boolean isStsThreadLocalSet = false;
try {
+ if (ozoneManager.isSecurityEnabled() && request.hasS3Authentication()) {
+ // STS token verification runs on the leader RPC path so we don't need to recheck here on the apply
+ // after the log is committed
+ STSSecurityUtil.ensureResolvedStsFieldsInvariants(request);
+
+ final OzoneManagerProtocolProtos.S3Authentication s3Auth = request.getS3Authentication();
+ // ThreadLocal carries S3 action for OmMetadataReader.
+ OzoneManager.setS3Auth(s3Auth);
+ isS3AuthThreadLocalSet = true;
+
+ if (s3Auth.hasSessionToken() && !s3Auth.getSessionToken().isEmpty()) {
+ // ThreadLocal carries session policy for OmMetadataReader
+ final STSTokenIdentifier rehydratedTokenIdentifier = new STSTokenIdentifier(
+ s3Auth.hasResolvedStsTempAccessKeyId() ? s3Auth.getResolvedStsTempAccessKeyId() : "",
+ s3Auth.hasResolvedStsOriginalAccessKeyId() ? s3Auth.getResolvedStsOriginalAccessKeyId() : "",
+ s3Auth.hasResolvedStsRoleArn() ? s3Auth.getResolvedStsRoleArn() : "",
+ java.time.Instant.MAX, // ensure it deterministically is not expired
+ "", // no secretAccessKey needed
+ s3Auth.hasResolvedStsSessionPolicy() ? s3Auth.getResolvedStsSessionPolicy() : "",
+ null // no encryption key needed
+ );
+ OzoneManager.setStsTokenIdentifier(rehydratedTokenIdentifier);
+ isStsThreadLocalSet = true;
+ }
+ }
ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex);
final OMClientResponse omClientResponse = handler.handleWriteRequest(
request, context, ozoneManagerDoubleBuffer);
@@ -636,6 +665,13 @@ OMResponse runCommand(OMRequest request, TermIndex termIndex) {
// For any Runtime exceptions, terminate OM.
String errorMessage = "Request " + request + " failed with exception";
ExitUtils.terminate(1, errorMessage, e, LOG);
+ } finally {
+ if (isS3AuthThreadLocalSet) {
+ OzoneManager.setS3Auth(null);
+ }
+ if (isStsThreadLocalSet) {
+ OzoneManager.setStsTokenIdentifier(null);
+ }
}
return null;
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 0420eef2fd5d..1d4816008454 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -27,6 +27,7 @@
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
+import java.util.UUID;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.ipc_.ProtobufRpcEngine;
@@ -54,6 +55,7 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.STSTokenIdentifier;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
@@ -112,15 +114,50 @@ public OMClientRequest(OMRequest omRequest) {
*/
public OMRequest preExecute(OzoneManager ozoneManager)
throws IOException {
- LayoutVersion layoutVersion = LayoutVersion.newBuilder()
+ final LayoutVersion layoutVersion = LayoutVersion.newBuilder()
.setVersion(ozoneManager.getVersionManager().getMetadataLayoutVersion())
.build();
- omRequest = getOmRequest().toBuilder()
+
+ final OMRequest.Builder requestBuilder = getOmRequest().toBuilder()
.setUserInfo(getUserIfNotExists(ozoneManager))
- .setLayoutVersion(layoutVersion).build();
+ .setLayoutVersion(layoutVersion);
+
+ if (requestBuilder.hasS3Authentication()) {
+ requestBuilder.setS3Authentication(
+ resolveS3Authentication(requestBuilder.getS3Authentication(), OzoneManager.getStsTokenIdentifier()));
+ }
+
+ omRequest = requestBuilder.build();
return omRequest;
}
+ private static OzoneManagerProtocolProtos.S3Authentication resolveS3Authentication(
+ OzoneManagerProtocolProtos.S3Authentication s3Auth, STSTokenIdentifier stsTokenIdentifier) {
+ final OzoneManagerProtocolProtos.S3Authentication.Builder s3AuthBuilder = s3Auth.toBuilder();
+
+ if (s3Auth.hasSessionToken() && !s3Auth.getSessionToken().isEmpty() && stsTokenIdentifier != null) {
+ s3AuthBuilder.setResolvedStsSessionPolicy(
+ StringUtils.defaultString(stsTokenIdentifier.getSessionPolicy()));
+ s3AuthBuilder.setResolvedStsRoleArn(
+ StringUtils.defaultString(stsTokenIdentifier.getRoleArn()));
+ s3AuthBuilder.setResolvedStsOriginalAccessKeyId(
+ StringUtils.defaultString(stsTokenIdentifier.getOriginalAccessKeyId()));
+ s3AuthBuilder.setResolvedStsTempAccessKeyId(
+ StringUtils.defaultString(stsTokenIdentifier.getTempAccessKeyId()));
+ final UUID secretKeyId = stsTokenIdentifier.getSecretKeyId();
+ s3AuthBuilder.setResolvedStsSecretKeyId(
+ secretKeyId != null ? secretKeyId.toString() : "");
+ } else {
+ s3AuthBuilder.clearResolvedStsSessionPolicy();
+ s3AuthBuilder.clearResolvedStsRoleArn();
+ s3AuthBuilder.clearResolvedStsOriginalAccessKeyId();
+ s3AuthBuilder.clearResolvedStsTempAccessKeyId();
+ s3AuthBuilder.clearResolvedStsSecretKeyId();
+ }
+
+ return s3AuthBuilder.build();
+ }
+
/**
* Performs any request specific failure handling during request
* submission. An example of this would be an undo of any steps
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
index 97dca83c1978..678c4ba0dc86 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
@@ -54,14 +54,15 @@ public class OMBucketSetAclRequest extends OMBucketAclRequest {
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
- long modificationTime = Time.now();
- OzoneManagerProtocolProtos.SetAclRequest.Builder setAclRequestBuilder =
+ final long modificationTime = Time.now();
+ final OzoneManagerProtocolProtos.SetAclRequest.Builder setAclRequestBuilder =
getOmRequest().getSetAclRequest().toBuilder()
.setModificationTime(modificationTime);
- return getOmRequest().toBuilder()
+ // super.preExecute resolves S3Authentication (STS) for Ratis apply. Merge SetAclRequest changes on top.
+ final OMRequest request = super.preExecute(ozoneManager);
+ return request.toBuilder()
.setSetAclRequest(setAclRequestBuilder)
- .setUserInfo(getUserInfo())
.build();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index be0935d909d8..62fd23fa3303 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -62,6 +62,7 @@
import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
@@ -403,6 +404,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
omBucketInfo.incrUsedBytes(correctedSpace);
+ omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder()
+ .setModificationTime(commitKeyArgs.getModificationTime())
+ .build());
+
omClientResponse = new OMKeyCommitResponse(omResponse.build(),
omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject(),
oldKeyVersionsToDeleteMap, isHSync, newOpenKeyInfo, dbOpenKeyToDeleteKey, openKeyToDelete);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
index 25b5a4b15d41..b3d06b3173b6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseWithFSO;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -347,6 +348,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
omBucketInfo.incrUsedBytes(correctedSpace);
+ omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder()
+ .setModificationTime(commitKeyArgs.getModificationTime())
+ .build());
+
omClientResponse = new OMKeyCommitResponseWithFSO(omResponse.build(),
omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject(),
oldKeyVersionsToDeleteMap, volumeId, isHSync, newOpenKeyInfo, dbOpenKeyToDeleteKey, openKeyToDelete);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index ac123ff680ac..fd9eead76d3e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -268,6 +268,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
if (eTag != null) {
commitResponseBuilder.setETag(eTag);
}
+ commitResponseBuilder.setModificationTime(keyArgs.getModificationTime());
omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder);
omClientResponse =
getOmClientResponse(ozoneManager, keyVersionsToDeleteMap, openKey,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java
index c414708cebee..2212ad6db797 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java
@@ -31,7 +31,9 @@
import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey;
import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
@@ -179,5 +181,44 @@ static void ensureEssentialFieldsArePresentInToken(STSTokenIdentifier stsTokenId
throw new SecretManager.InvalidToken("Invalid STS token - secretAccessKey is null/empty");
}
}
+
+ /**
+ * Ensures STS-related {@link S3Authentication} fields are structurally consistent on the Ratis
+ * apply path. Cryptographic validation (signature, expiry, secret key lookup) runs on the leader
+ * RPC path (e.g. {@code S3SecurityUtil.validateS3Credential}). This method performs no crypto and does
+ * not contact {@link SecretKeyClient}, keeping the apply thread deterministic and lightweight.
+ *
+ * @param request OM request possibly containing S3 authentication
+ * @throws OMException if resolved fields and session token presence are inconsistent
+ */
+ public static void ensureResolvedStsFieldsInvariants(OzoneManagerProtocolProtos.OMRequest request)
+ throws OMException {
+ if (!request.hasS3Authentication()) {
+ return;
+ }
+
+ final S3Authentication s3Auth = request.getS3Authentication();
+ final boolean hasSessionToken = s3Auth.hasSessionToken() && !s3Auth.getSessionToken().isEmpty();
+
+ if (!hasSessionToken) {
+ // If sessionToken is missing/empty, resolved fields must be empty.
+ if (s3Auth.hasResolvedStsSessionPolicy() || s3Auth.hasResolvedStsRoleArn() ||
+ s3Auth.hasResolvedStsOriginalAccessKeyId() || s3Auth.hasResolvedStsTempAccessKeyId() ||
+ s3Auth.hasResolvedStsSecretKeyId()) {
+ throw new OMException("Resolved STS fields must be empty when sessionToken is not present", INVALID_TOKEN);
+ }
+ return;
+ }
+
+ ensureResolvedFieldsArePresent(s3Auth);
+ }
+
+ private static void ensureResolvedFieldsArePresent(S3Authentication s3Auth) throws OMException {
+ if (!s3Auth.hasResolvedStsSessionPolicy() || !s3Auth.hasResolvedStsRoleArn() ||
+ !s3Auth.hasResolvedStsOriginalAccessKeyId() || !s3Auth.hasResolvedStsTempAccessKeyId() ||
+ !s3Auth.hasResolvedStsSecretKeyId()) {
+ throw new OMException("Resolved STS fields must be present when sessionToken is present", INVALID_TOKEN);
+ }
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMetadataReader.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMetadataReader.java
index 8403d2203e01..6a6dea551f51 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMetadataReader.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMetadataReader.java
@@ -23,7 +23,6 @@
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
@@ -78,8 +77,9 @@ public class TestOMMetadataReader {
private static final long MAX_KEYS = 100L;
@AfterEach
- public void clearStsThreadLocal() {
+ public void clearOmThreadLocals() {
OzoneManager.setStsTokenIdentifier(null);
+ OzoneManager.setS3Auth(null);
}
@Test
@@ -145,7 +145,58 @@ public void testNoSessionPolicyWhenThreadLocalIsNull() throws Exception {
}
@Test
- public void testListStatusUsesListAclForStsS3Request() throws Exception {
+ public void testCheckAclsAttachesS3ActionFromThreadLocal() throws Exception {
+ OzoneManager.setS3Auth(S3Authentication.newBuilder()
+ .setAccessId(ACCESS_KEY_ID)
+ .setS3Action("GetObject")
+ .build());
+
+ final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue();
+ final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer);
+
+ final RequestContext contextWithoutS3Action = createTestRequestContext();
+ final OzoneObj obj = createTestOzoneObj();
+
+ assertTrue(omMetadataReader.checkAcls(obj, contextWithoutS3Action, true));
+
+ verifyS3ActionPassedToAuthorizer(accessAuthorizer, obj, "GetObject");
+ }
+
+ @Test
+ public void testCheckAclsLeavesS3ActionUnsetWhenS3AuthThreadLocalNull() throws Exception {
+ final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue();
+ final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer);
+
+ final RequestContext contextWithoutS3Action = createTestRequestContext();
+ final OzoneObj obj = createTestOzoneObj();
+
+ assertTrue(omMetadataReader.checkAcls(obj, contextWithoutS3Action, true));
+
+ verifyS3ActionPassedToAuthorizer(accessAuthorizer, obj, null);
+ }
+
+ @Test
+ public void testCheckAclsAttachesSessionPolicyAndS3ActionFromThreadLocals() throws Exception {
+ setupStsTokenIdentifier();
+
+ OzoneManager.setS3Auth(S3Authentication.newBuilder()
+ .setAccessId(ACCESS_KEY_ID)
+ .setS3Action("PutObject")
+ .build());
+
+ final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue();
+ final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer);
+
+ final RequestContext baseContext = createTestRequestContext();
+ final OzoneObj obj = createTestOzoneObj();
+
+ assertTrue(omMetadataReader.checkAcls(obj, baseContext, true));
+
+ verifySessionPolicyAndS3ActionPassedToAuthorizer(accessAuthorizer, obj);
+ }
+
+ @Test
+ public void testListStatusUsesReadAclForStsS3Request() throws Exception {
setupStsS3Request();
final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue();
@@ -161,10 +212,9 @@ public void testListStatusUsesListAclForStsS3Request() throws Exception {
// For STS S3 requests, listStatus() performs these checks:
// 1. Volume READ (for volume access)
- // 2) Key LIST (for the specific prefix being listed) - we need LIST permission for STS in order to tell whether the
- // file should be listed only or downloadable (downloadable would be READ)
+ // 2) Key READ (for the specific prefix being listed)
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, KEY_PREFIX);
+ assertContainsKeyReadCheckWithName(checks, KEY_PREFIX);
}
@Test
@@ -186,7 +236,6 @@ public void testListStatusUsesReadAclForNonStsRequest() throws Exception {
assertContainsVolumeReadCheck(checks);
// We want to ensure the current behavior for non-STS requests remains the same
assertContainsKeyReadCheckWithName(checks);
- assertDoesNotContainKeyListCheck(checks);
}
@Test
@@ -209,7 +258,7 @@ public void testListStatusUsesListPrefixForAclWhenKeyNameEmptyAndListPrefixSet()
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, "userA/");
+ assertContainsKeyReadCheckWithName(checks, "userA/");
}
@Test
@@ -231,7 +280,7 @@ public void testListStatusUsesWildcardForAclWhenKeyNameAndListPrefixEmpty() thro
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, "*");
+ assertContainsKeyReadCheckWithName(checks, "*");
}
@Test
@@ -254,7 +303,7 @@ public void testListStatusUsesListPrefixForAclWhenKeyNameIsDescendantOfListPrefi
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, "user");
+ assertContainsKeyReadCheckWithName(checks, "user");
}
@Test
@@ -277,7 +326,7 @@ public void testListStatusUsesListPrefixForAclWhenKeyNameIsAncestorOfListPrefix(
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, "user/foo");
+ assertContainsKeyReadCheckWithName(checks, "user/foo");
}
@Test
@@ -301,7 +350,7 @@ public void testListStatusThrowsWhenStsKeyNameNotUnderListPrefix() throws Except
}
@Test
- public void testGetFileStatusUsesListAclForStsS3Request() throws Exception {
+ public void testGetFileStatusUsesReadAclForStsS3Request() throws Exception {
setupStsS3Request();
final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue();
@@ -314,8 +363,7 @@ public void testGetFileStatusUsesListAclForStsS3Request() throws Exception {
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
- assertContainsKeyListCheckWithName(checks, KEY_PREFIX);
- assertDoesNotContainKeyReadCheck(checks);
+ assertContainsKeyReadCheckWithName(checks, KEY_PREFIX);
}
@Test
@@ -333,7 +381,6 @@ public void testGetFileStatusUsesReadAclForNonStsS3Request() throws Exception {
final List checks = captureAclChecks(accessAuthorizer, 2);
assertContainsVolumeReadCheck(checks);
assertContainsKeyReadCheckWithName(checks);
- assertDoesNotContainKeyListCheck(checks);
}
@Test
@@ -350,7 +397,7 @@ public void testListKeysUsesPrefixCheckForStsS3Request() throws Exception {
List checks = captureAclChecks(accessAuthorizer, 4);
assertContainsBucketListCheck(checks);
- assertContainsKeyListCheckWithName(checks, "userA/");
+ assertContainsKeyReadCheckWithName(checks, "userA/");
// Reset to make case 2 assertions independent of case 1 captures.
reset(accessAuthorizer);
@@ -361,7 +408,7 @@ public void testListKeysUsesPrefixCheckForStsS3Request() throws Exception {
checks = captureAclChecks(accessAuthorizer, 4);
assertContainsBucketListCheck(checks);
- assertContainsKeyListCheckWithName(checks, "*");
+ assertContainsKeyReadCheckWithName(checks, "*");
}
private OmMetadataReader createMetadataReader(IAccessAuthorizer accessAuthorizer) throws IOException {
@@ -505,6 +552,27 @@ private void verifySessionPolicyPassedToAuthorizer(IAccessAuthorizer accessAutho
assertEquals(expectedSessionPolicy, captor.getValue().getSessionPolicy());
}
+ /**
+ * Verifies that the accessAuthorizer received a call to checkAccess with the expected s3 action.
+ * @param accessAuthorizer the mock authorizer to verify
+ * @param expectedObj the expected OzoneObj
+ * @param expectedS3Action the expected s3 action (could be null)
+ */
+ private void verifyS3ActionPassedToAuthorizer(IAccessAuthorizer accessAuthorizer, OzoneObj expectedObj,
+ String expectedS3Action) throws OMException {
+ final ArgumentCaptor captor = ArgumentCaptor.forClass(RequestContext.class);
+ verify(accessAuthorizer).checkAccess(eq(expectedObj), captor.capture());
+ assertEquals(expectedS3Action, captor.getValue().getS3Action());
+ }
+
+ private void verifySessionPolicyAndS3ActionPassedToAuthorizer(IAccessAuthorizer accessAuthorizer,
+ OzoneObj expectedObj) throws OMException {
+ final ArgumentCaptor captor = ArgumentCaptor.forClass(RequestContext.class);
+ verify(accessAuthorizer).checkAccess(eq(expectedObj), captor.capture());
+ assertEquals("session-policy-from-thread-local", captor.getValue().getSessionPolicy());
+ assertEquals("PutObject", captor.getValue().getS3Action());
+ }
+
private List captureAclChecks(IAccessAuthorizer accessAuthorizer, int expectedCheckCount)
throws OMException {
final ArgumentCaptor objCaptor = ArgumentCaptor.forClass(OzoneObj.class);
@@ -538,12 +606,12 @@ private void assertContainsBucketListCheck(List checks) {
"Expected a BUCKET LIST ACL check");
}
- private void assertContainsKeyListCheckWithName(List checks, String keyName) {
+ private void assertContainsKeyReadCheckWithName(List checks, String keyName) {
assertTrue(
checks.stream().anyMatch(
- check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == LIST &&
+ check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == READ &&
keyName.equals(check.getObj().getKeyName())),
- "Expected a KEY LIST ACL check for key '" + keyName + "'");
+ "Expected a KEY READ ACL check for key '" + keyName + "'");
}
private void assertContainsKeyReadCheckWithName(List checks) {
@@ -554,20 +622,6 @@ private void assertContainsKeyReadCheckWithName(List checks) {
"Expected a KEY READ ACL check for key '" + TestOMMetadataReader.KEY_PREFIX + "'");
}
- private void assertDoesNotContainKeyReadCheck(List checks) {
- assertFalse(
- checks.stream().anyMatch(
- check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == READ),
- "Did not expect a KEY READ ACL check");
- }
-
- private void assertDoesNotContainKeyListCheck(List checks) {
- assertFalse(
- checks.stream().anyMatch(
- check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == LIST),
- "Did not expect a KEY LIST ACL check");
- }
-
private static final class AclCheck {
private final OzoneObj obj;
private final RequestContext context;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
index 6d3621cc0098..4baef5ac9a04 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
@@ -210,6 +210,72 @@ public void testUserInfoWithSTSToken() throws IOException {
}
}
+ @Test
+ public void testPreExecuteOverwritesResolvedStsFields() throws Exception {
+ try (MockedStatic mockedRpcServer = mockStatic(Server.class)) {
+ mockedRpcServer.when(Server::getRemoteUser).thenReturn(userGroupInformation);
+ mockedRpcServer.when(Server::getRemoteIp).thenReturn(inetAddress);
+ mockedRpcServer.when(Server::getRemoteAddress).thenReturn(inetAddress.toString());
+
+ final String accessId = "ASIA12345";
+ final String signature = "Signature";
+ final String stringToSign = "StringToSign";
+ final String sessionToken = "SessionToken";
+ final String originalAccessKeyId = "AKIAORIGINAL";
+ final String roleArn = "arn:aws:iam::123456789012:role/test-role";
+ final String sessionPolicy = "test-session-policy";
+ final UUID secretKeyId = UUID.randomUUID();
+
+ final STSTokenIdentifier stsTokenIdentifier = mock(STSTokenIdentifier.class);
+ when(stsTokenIdentifier.getSessionPolicy()).thenReturn(sessionPolicy);
+ when(stsTokenIdentifier.getRoleArn()).thenReturn(roleArn);
+ when(stsTokenIdentifier.getOriginalAccessKeyId()).thenReturn(originalAccessKeyId);
+ when(stsTokenIdentifier.getTempAccessKeyId()).thenReturn(accessId);
+ when(stsTokenIdentifier.getSecretKeyId()).thenReturn(secretKeyId);
+
+ final S3Authentication s3Authentication = S3Authentication.newBuilder()
+ .setAccessId(accessId)
+ .setSignature(signature)
+ .setStringToSign(stringToSign)
+ .setSessionToken(sessionToken)
+ .setResolvedStsSessionPolicy("client-session-policy")
+ .setResolvedStsRoleArn("client-role")
+ .setResolvedStsOriginalAccessKeyId("client-original-access-key-id")
+ .setResolvedStsTempAccessKeyId("client-temp-access-key-id")
+ .setResolvedStsSecretKeyId("client-secret-key-id")
+ .build();
+
+ OzoneManager.setS3Auth(s3Authentication);
+ OzoneManager.setStsTokenIdentifier(stsTokenIdentifier);
+
+ try {
+ final String bucketName = UUID.randomUUID().toString();
+ final String volumeName = UUID.randomUUID().toString();
+ final BucketInfo.Builder bucketInfo =
+ newBucketInfoBuilder(bucketName, volumeName)
+ .setIsVersionEnabled(true)
+ .setStorageType(StorageTypeProto.DISK);
+
+ final OMRequest omRequest = newCreateBucketRequest(bucketInfo)
+ .setS3Authentication(s3Authentication)
+ .build();
+
+ final OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest);
+ final OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager);
+ final S3Authentication modifiedS3Auth = modifiedRequest.getS3Authentication();
+
+ assertEquals(sessionPolicy, modifiedS3Auth.getResolvedStsSessionPolicy());
+ assertEquals(roleArn, modifiedS3Auth.getResolvedStsRoleArn());
+ assertEquals(originalAccessKeyId, modifiedS3Auth.getResolvedStsOriginalAccessKeyId());
+ assertEquals(accessId, modifiedS3Auth.getResolvedStsTempAccessKeyId());
+ assertEquals(secretKeyId.toString(), modifiedS3Auth.getResolvedStsSecretKeyId());
+ } finally {
+ OzoneManager.setStsTokenIdentifier(null);
+ OzoneManager.setS3Auth(null);
+ }
+ }
+ }
+
@Test
public void testUserInfoWithSTSAccessKeyMissingSessionToken() {
final String accessId = "ASIA12345";
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java
index c93df8a49009..8decf4fd316f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java
@@ -35,7 +35,10 @@
import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.ozone.test.TestClock;
@@ -374,4 +377,76 @@ public void testEnsureEssentialFieldsArePresentInTokenMissingSecretAccessKey() {
.isInstanceOf(SecretManager.InvalidToken.class)
.hasMessage("Invalid STS token - secretAccessKey is null/empty");
}
+
+ @Test
+ public void testEnsureResolvedStsFieldsInvariantsSuccess() throws Exception {
+ final String tokenString = tokenSecretManager.createSTSTokenString(
+ TEMP_ACCESS_KEY, ORIGINAL_ACCESS_KEY, ROLE_ARN, DURATION_SECONDS, SECRET_ACCESS_KEY, SESSION_POLICY, clock);
+
+ final S3Authentication s3Auth = S3Authentication.newBuilder()
+ .setSessionToken(tokenString)
+ .setResolvedStsSessionPolicy(SESSION_POLICY)
+ .setResolvedStsRoleArn(ROLE_ARN)
+ .setResolvedStsOriginalAccessKeyId(ORIGINAL_ACCESS_KEY)
+ .setResolvedStsTempAccessKeyId(TEMP_ACCESS_KEY)
+ .setResolvedStsSecretKeyId(secretKeyId.toString())
+ .build();
+
+ final OMRequest request = OMRequest.newBuilder()
+ .setCmdType(Type.CreateBucket)
+ .setClientId("client-id")
+ .setS3Authentication(s3Auth)
+ .build();
+
+ STSSecurityUtil.ensureResolvedStsFieldsInvariants(request);
+ }
+
+ @Test
+ public void testEnsureResolvedStsFieldsInvariantsMissingSessionToken() {
+ final S3Authentication s3Auth = S3Authentication.newBuilder()
+ .setResolvedStsSessionPolicy(SESSION_POLICY)
+ .build();
+
+ final OMRequest request = OMRequest.newBuilder()
+ .setCmdType(Type.CreateBucket)
+ .setClientId("client-id")
+ .setS3Authentication(s3Auth)
+ .build();
+
+ assertThatThrownBy(() -> STSSecurityUtil.ensureResolvedStsFieldsInvariants(request))
+ .isInstanceOf(OMException.class)
+ .hasMessageContaining("Resolved STS fields must be empty when sessionToken is not present");
+ }
+
+ @Test
+ public void testEnsureResolvedStsFieldsInvariantsMissingResolvedFields() throws Exception {
+ final String tokenString = tokenSecretManager.createSTSTokenString(
+ TEMP_ACCESS_KEY, ORIGINAL_ACCESS_KEY, ROLE_ARN, DURATION_SECONDS,
+ SECRET_ACCESS_KEY, SESSION_POLICY, clock);
+
+ final S3Authentication s3Auth = S3Authentication.newBuilder()
+ .setSessionToken(tokenString)
+ .build();
+
+ final OMRequest request = OMRequest.newBuilder()
+ .setCmdType(Type.CreateBucket)
+ .setClientId("client-id")
+ .setS3Authentication(s3Auth)
+ .build();
+
+ assertThatThrownBy(() -> STSSecurityUtil.ensureResolvedStsFieldsInvariants(request))
+ .isInstanceOf(OMException.class)
+ .hasMessageContaining("Resolved STS fields must be present when sessionToken is present");
+ }
+
+ @Test
+ public void testEnsureResolvedStsFieldsInvariantsNoS3Auth() throws Exception {
+ final OMRequest request = OMRequest.newBuilder()
+ .setCmdType(Type.CreateBucket)
+ .setClientId("client-id")
+ .build();
+
+ // Should not throw
+ STSSecurityUtil.ensureResolvedStsFieldsInvariants(request);
+ }
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 799af5d7fa95..8325017c7b5f 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -100,7 +100,6 @@ public Response get(
S3RequestContext context = new S3RequestContext(this, S3GAction.GET_BUCKET);
long startNanos = context.getStartNanos();
- S3GAction s3GAction = context.getAction();
PerformanceStringBuilder perf = context.getPerf();
// Chain of responsibility: let each handler try to handle the request
@@ -128,7 +127,7 @@ public Response get(
try {
final String uploads = queryParams().get(QueryParams.UPLOADS);
if (uploads != null) {
- s3GAction = S3GAction.LIST_MULTIPART_UPLOAD;
+ context.setAction(S3GAction.LIST_MULTIPART_UPLOAD);
final String uploadIdMarker = queryParams().get(QueryParams.UPLOAD_ID_MARKER);
final String keyMarker = queryParams().get(QueryParams.KEY_MARKER);
return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads);
@@ -161,12 +160,12 @@ public Response get(
ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow);
} catch (OMException ex) {
- auditReadFailure(s3GAction, ex);
+ auditReadFailure(context.getAction(), ex);
getMetrics().updateGetBucketFailureStats(startNanos);
handleOMException(ex, bucketName, prefix);
} catch (Exception ex) {
getMetrics().updateGetBucketFailureStats(startNanos);
- auditReadFailure(s3GAction, ex);
+ auditReadFailure(context.getAction(), ex);
throw ex;
}
@@ -255,11 +254,11 @@ public Response get(
}
} catch (RuntimeException ex) {
getMetrics().updateGetBucketFailureStats(startNanos);
- auditReadFailure(s3GAction, ex);
+ auditReadFailure(context.getAction(), ex);
if (ex.getCause() instanceof OMException) {
final OMException omException = (OMException) ex.getCause();
if (omException.getResult() == ResultCodes.FILE_NOT_FOUND) {
- throw ex;
+ throw ex;
}
handleOMException(omException, bucketName, prefix);
} else {
@@ -289,7 +288,7 @@ public Response get(
getMetrics().incListKeyCount(keyCount);
perf.appendCount(keyCount);
perf.appendOpLatencyNanos(opLatencyNs);
- auditReadSuccess(s3GAction, perf);
+ auditReadSuccess(context.getAction(), perf);
response.setKeyCount(keyCount);
return Response.ok(response).build();
}
@@ -390,16 +389,16 @@ public Response listMultipartUploads(
@HEAD
public Response head(@PathParam(BUCKET) String bucketName)
throws OS3Exception, IOException {
- long startNanos = Time.monotonicNowNanos();
- S3GAction s3GAction = S3GAction.HEAD_BUCKET;
+ S3RequestContext context = new S3RequestContext(this, S3GAction.HEAD_BUCKET);
+ long startNanos = context.getStartNanos();
try {
OzoneBucket bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
- auditReadSuccess(s3GAction);
+ auditReadSuccess(context.getAction());
getMetrics().updateHeadBucketSuccessStats(startNanos);
return Response.ok().build();
} catch (Exception e) {
- auditReadFailure(s3GAction, e);
+ auditReadFailure(context.getAction(), e);
throw e;
}
}
@@ -438,7 +437,7 @@ public MultiDeleteResponse multiDelete(
@QueryParam(QueryParams.DELETE) String delete,
MultiDeleteRequest request
) throws OS3Exception, IOException {
- S3GAction s3GAction = S3GAction.MULTI_DELETE;
+ S3RequestContext context = new S3RequestContext(this, S3GAction.MULTI_DELETE);
OzoneBucket bucket = getBucket(bucketName);
MultiDeleteResponse result = new MultiDeleteResponse();
@@ -449,7 +448,7 @@ public MultiDeleteResponse multiDelete(
for (DeleteObject keyToDelete : request.getObjects()) {
deleteKeys.add(keyToDelete.getKey());
}
- long startNanos = Time.monotonicNowNanos();
+ long startNanos = context.getStartNanos();
try {
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true);
@@ -477,7 +476,7 @@ public MultiDeleteResponse multiDelete(
}
}
- AuditMessage.Builder message = auditMessageFor(s3GAction);
+ AuditMessage.Builder message = auditMessageFor(context.getAction());
message.getParams().put("failedDeletes", deleteKeys.toString());
if (!result.getErrors().isEmpty()) {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
index f3b8b6e60e60..354158a892a5 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java
@@ -46,8 +46,12 @@ public CopyPartResult() {
}
public CopyPartResult(String eTag) {
+ this(eTag, Instant.now());
+ }
+
+ public CopyPartResult(String eTag, Instant lastModified) {
this.eTag = eTag;
- this.lastModified = Instant.now();
+ this.lastModified = lastModified;
}
public Instant getLastModified() {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyResult.java
new file mode 100644
index 000000000000..8b67ddbcbaaa
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyResult.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+/**
+ * Result of a copy operation.
+ */
+public class CopyResult {
+ private final String eTag;
+ private final long size;
+ private final long modificationTime;
+
+ public CopyResult(String eTag, long size, long modificationTime) {
+ this.eTag = eTag;
+ this.size = size;
+ this.modificationTime = modificationTime;
+ }
+
+ public String getETag() {
+ return eTag;
+ }
+
+ public long getSize() {
+ return size;
+ }
+
+ public long getModificationTime() {
+ return modificationTime;
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 20ad21e23f11..5b31ffbc7716 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -87,6 +87,7 @@
import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
import org.apache.hadoop.ozone.audit.AuditLoggerType;
import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.audit.S3GAction;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
@@ -106,11 +107,13 @@
import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics;
import org.apache.hadoop.ozone.s3.signature.SignatureInfo;
import org.apache.hadoop.ozone.s3.util.AuditUtils;
+import org.apache.hadoop.ozone.s3.util.S3GActionIamMapper;
import org.apache.hadoop.ozone.s3.util.S3Utils;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
+import org.apache.ratis.util.function.CheckedSupplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -252,6 +255,39 @@ protected void init() {
// hook method
}
+ /**
+ * Sets the IAM S3 action on thread-local {@link S3Auth} for fine-grained STS authorization.
+ * Called when the handler resolves the {@link S3GAction}.
+ */
+ protected void applyS3Action(S3GAction action) {
+ if (s3Auth != null) {
+ s3Auth.setS3Action(S3GActionIamMapper.toS3ActionString(action));
+ }
+ }
+
+ /**
+ * Temporarily override the S3 action string set on {@link S3Auth} for authorization.
+ *
+ * This does not change S3G auditing (which is based on {@link S3GAction}).
+ * The action string is the IAM-style S3 action name without the {@code s3:} prefix (for example
+ * {@code GetObject}, {@code PutObject}, {@code GetObjectTagging}).
+ * This is used for special case APIs like CopyObject that don't have a 1-1 s3 action mapping, but
+ * requires GetObject on the source file and PutObject on the destination file.
+ */
+ protected T runWithS3ActionString(String s3Action, CheckedSupplier checkedSupplier)
+ throws E {
+ if (s3Auth == null) {
+ return checkedSupplier.get();
+ }
+ final String originalS3Action = s3Auth.getS3Action();
+ s3Auth.setS3Action(s3Action);
+ try {
+ return checkedSupplier.get();
+ } finally {
+ s3Auth.setS3Action(originalS3Action);
+ }
+ }
+
protected OzoneBucket getBucket(String bucketName)
throws OS3Exception, IOException {
OzoneBucket bucket;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d97c514f9ae6..bd04ceaccda8 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -539,8 +539,8 @@ static void addTagCountIfAny(
public Response head(
@PathParam(BUCKET) String bucketName,
@PathParam(PATH) String keyPath) throws IOException, OS3Exception {
- long startNanos = Time.monotonicNowNanos();
- S3GAction s3GAction = S3GAction.HEAD_KEY;
+ ObjectRequestContext context = new ObjectRequestContext(S3GAction.HEAD_KEY, bucketName);
+ long startNanos = context.getStartNanos();
OzoneKey key;
try {
@@ -553,7 +553,7 @@ public Response head(
isFile(keyPath, key);
// TODO: return the specified range bytes of this object.
} catch (OMException ex) {
- auditReadFailure(s3GAction, ex);
+ auditReadFailure(context.getAction(), ex);
getMetrics().updateHeadKeyFailureStats(startNanos);
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
// Just return 404 with no content
@@ -568,7 +568,7 @@ public Response head(
throw ex;
}
} catch (Exception ex) {
- auditReadFailure(s3GAction, ex);
+ auditReadFailure(context.getAction(), ex);
throw ex;
}
@@ -596,7 +596,7 @@ public Response head(
addLastModifiedDate(response, key);
addCustomMetadataHeaders(response, key);
getMetrics().updateHeadKeySuccessStats(startNanos);
- auditReadSuccess(s3GAction);
+ auditReadSuccess(context.getAction());
return response.build();
}
@@ -691,8 +691,8 @@ public Response initializeMultipartUpload(
@PathParam(BUCKET) String bucket,
@PathParam(PATH) String key
) throws IOException, OS3Exception {
- long startNanos = Time.monotonicNowNanos();
- S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD;
+ ObjectRequestContext context = new ObjectRequestContext(S3GAction.INIT_MULTIPART_UPLOAD, bucket);
+ long startNanos = context.getStartNanos();
try {
OzoneBucket ozoneBucket = getBucket(bucket);
@@ -715,12 +715,12 @@ public Response initializeMultipartUpload(
multipartUploadInitiateResponse.setKey(key);
multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
- auditWriteSuccess(s3GAction);
+ auditWriteSuccess(context.getAction());
getMetrics().updateInitMultipartUploadSuccessStats(startNanos);
return Response.status(Status.OK).entity(
multipartUploadInitiateResponse).build();
} catch (OMException ex) {
- auditWriteFailure(s3GAction, ex);
+ auditWriteFailure(context.getAction(), ex);
getMetrics().updateInitMultipartUploadFailureStats(startNanos);
if (isExpiredToken(ex)) {
throw newError(S3ErrorTable.EXPIRED_TOKEN, key, ex);
@@ -730,7 +730,7 @@ public Response initializeMultipartUpload(
}
throw ex;
} catch (Exception ex) {
- auditWriteFailure(s3GAction, ex);
+ auditWriteFailure(context.getAction(), ex);
getMetrics().updateInitMultipartUploadFailureStats(startNanos);
throw ex;
}
@@ -746,9 +746,9 @@ public Response completeMultipartUpload(
@PathParam(PATH) String key,
CompleteMultipartUploadRequest multipartUploadRequest
) throws IOException, OS3Exception {
+ ObjectRequestContext context = new ObjectRequestContext(S3GAction.COMPLETE_MULTIPART_UPLOAD, bucket);
final String uploadID = queryParams().get(QueryParams.UPLOAD_ID, "");
- long startNanos = Time.monotonicNowNanos();
- S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD;
+ long startNanos = context.getStartNanos();
OzoneVolume volume = getVolume();
// Using LinkedHashMap to preserve ordering of parts list.
Map partsMap = new LinkedHashMap<>();
@@ -776,12 +776,12 @@ public Response completeMultipartUpload(
wrapInQuotes(omMultipartUploadCompleteInfo.getHash()));
// Location also setting as bucket name.
completeMultipartUploadResponse.setLocation(bucket);
- auditWriteSuccess(s3GAction);
+ auditWriteSuccess(context.getAction());
getMetrics().updateCompleteMultipartUploadSuccessStats(startNanos);
return Response.status(Status.OK).entity(completeMultipartUploadResponse)
.build();
} catch (OMException ex) {
- auditWriteFailure(s3GAction, ex);
+ auditWriteFailure(context.getAction(), ex);
getMetrics().updateCompleteMultipartUploadFailureStats(startNanos);
if (ex.getResult() == ResultCodes.INVALID_PART) {
throw newError(S3ErrorTable.INVALID_PART, key, ex);
@@ -814,7 +814,7 @@ public Response completeMultipartUpload(
}
throw ex;
} catch (Exception ex) {
- auditWriteFailure(s3GAction, ex);
+ auditWriteFailure(context.getAction(), ex);
throw ex;
}
}
@@ -854,8 +854,8 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
uploadID, getChunkSize(), multiDigestInputStream, perf, getHeaders());
}
// OmMultipartCommitUploadPartInfo can only be gotten after the
- // OzoneOutputStream is closed, so we need to save the OzoneOutputStream
- final OzoneOutputStream outputStream;
+ // OzoneOutputStream is closed, so we need to get and save the commit info.
+ final OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo;
long metadataLatencyNs;
if (copyHeader != null) {
Pair result = parseSourceHeader(copyHeader);
@@ -867,8 +867,8 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
ozoneBucket.getOwner());
}
- OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails(
- volume.getName(), sourceBucket, sourceKey);
+ final OzoneKeyDetails sourceKeyDetails = runWithS3ActionString(
+ "GetObject", () -> getClientProtocol().getKeyDetails(volume.getName(), sourceBucket, sourceKey));
String range =
getHeaders().getHeaderString(COPY_SOURCE_HEADER_RANGE);
RangeHeader rangeHeader = null;
@@ -893,7 +893,8 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
}
try (OzoneInputStream sourceObject = sourceKeyDetails.getContent()) {
- long copyLength;
+ final long[] copyLengthHolder = new long[1];
+ final long[] metadataLatencyHolder = new long[1];
if (range != null) {
final long skipped =
sourceObject.skip(rangeHeader.getStartOffset());
@@ -903,52 +904,55 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
+ rangeHeader.getStartOffset() + " actual: " + skipped);
}
}
- try (OzoneOutputStream ozoneOutputStream = getClientProtocol()
- .createMultipartKey(volume.getName(), bucketName, key, length,
- partNumber, uploadID)) {
- metadataLatencyNs =
- getMetrics().updateCopyKeyMetadataStats(startNanos);
- copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, length,
- new byte[getIOBufferSize(length)]);
- ozoneOutputStream.getMetadata()
- .putAll(sourceKeyDetails.getMetadata());
- String raw = ozoneOutputStream.getMetadata().get(OzoneConsts.ETAG);
- if (raw != null) {
- ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, stripQuotes(raw));
+ final long finalLength = length;
+ final long bytesToCopy = length;
+ omMultipartCommitUploadPartInfo = runWithS3ActionString("PutObject", () -> {
+ final OzoneOutputStream ozoneOutputStream = getClientProtocol().createMultipartKey(
+ volume.getName(), bucketName, key, finalLength, partNumber, uploadID);
+ try (OzoneOutputStream ignored = ozoneOutputStream) {
+ metadataLatencyHolder[0] = getMetrics().updateCopyKeyMetadataStats(startNanos);
+ copyLengthHolder[0] = IOUtils.copyLarge(
+ sourceObject, ozoneOutputStream, 0, bytesToCopy, new byte[getIOBufferSize(bytesToCopy)]);
+ ozoneOutputStream.getMetadata()
+ .putAll(sourceKeyDetails.getMetadata());
+ final String raw = ozoneOutputStream.getMetadata().get(OzoneConsts.ETAG);
+ if (raw != null) {
+ ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, stripQuotes(raw));
+ }
}
- outputStream = ozoneOutputStream;
- }
- getMetrics().incCopyObjectSuccessLength(copyLength);
- perf.appendSizeBytes(copyLength);
+ return ozoneOutputStream.getCommitUploadPartInfo();
+ });
+ metadataLatencyNs = metadataLatencyHolder[0];
+ getMetrics().incCopyObjectSuccessLength(copyLengthHolder[0]);
+ perf.appendSizeBytes(copyLengthHolder[0]);
}
} else {
- long putLength;
- try (OzoneOutputStream ozoneOutputStream = getClientProtocol()
+ final long putLength;
+ final OzoneOutputStream ozoneOutputStream = getClientProtocol()
.createMultipartKey(volume.getName(), bucketName, key, length,
- partNumber, uploadID)) {
+ partNumber, uploadID);
+ try (OzoneOutputStream ignored = ozoneOutputStream) {
metadataLatencyNs =
getMetrics().updatePutKeyMetadataStats(startNanos);
putLength = IOUtils.copyLarge(multiDigestInputStream, ozoneOutputStream, 0, length,
new byte[getIOBufferSize(length)]);
- byte[] digest = multiDigestInputStream.getMessageDigest(OzoneConsts.MD5_HASH).digest();
- String md5Hash = DatatypeConverter.printHexBinary(digest).toLowerCase();
- String clientContentMD5 = getHeaders().getHeaderString(S3Consts.CHECKSUM_HEADER);
+ final byte[] digest = multiDigestInputStream.getMessageDigest(OzoneConsts.MD5_HASH).digest();
+ final String md5Hash = DatatypeConverter.printHexBinary(digest).toLowerCase();
+ final String clientContentMD5 = getHeaders().getHeaderString(S3Consts.CHECKSUM_HEADER);
if (clientContentMD5 != null) {
- CheckedRunnable checkContentMD5Hook = () -> {
+ final CheckedRunnable checkContentMD5Hook = () -> {
S3Utils.validateContentMD5(clientContentMD5, md5Hash, key);
};
ozoneOutputStream.getKeyOutputStream().setPreCommits(Collections.singletonList(checkContentMD5Hook));
}
ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, md5Hash);
- outputStream = ozoneOutputStream;
}
+ omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
getMetrics().incPutKeySuccessLength(putLength);
perf.appendSizeBytes(putLength);
}
perf.appendMetaLatencyNanos(metadataLatencyNs);
- OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
- outputStream.getCommitUploadPartInfo();
String eTag = omMultipartCommitUploadPartInfo.getETag();
// If the OmMultipartCommitUploadPartInfo does not contain eTag,
// fall back to MPU part name for compatibility in case the (old) OM
@@ -960,7 +964,10 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
if (copyHeader != null) {
getMetrics().updateCopyObjectSuccessStats(startNanos);
- return Response.ok(new CopyPartResult(eTag)).build();
+ final Instant lastModified = omMultipartCommitUploadPartInfo.getModificationTime().isPresent()
+ ? Instant.ofEpochMilli(omMultipartCommitUploadPartInfo.getModificationTime().getAsLong())
+ : Instant.now();
+ return Response.ok(new CopyPartResult(eTag, lastModified)).build();
} else {
getMetrics().updateCreateMultipartKeySuccessStats(startNanos);
return Response.ok().header(HttpHeaders.ETAG, eTag).build();
@@ -985,6 +992,24 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
throw os3Exception;
}
throw ex;
+ } catch (IOException ex) {
+ // Ensure we handle permission failures - these can surface as IOException wrapping OMException.
+ if (copyHeader != null) {
+ getMetrics().updateCopyObjectFailureStats(startNanos);
+ } else {
+ getMetrics().updateCreateMultipartKeyFailureStats(startNanos);
+ }
+ final OMException omEx = findOMException(ex);
+ if (omEx != null) {
+ if (omEx.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
+ throw newError(NO_SUCH_UPLOAD, uploadID, omEx);
+ } else if (isExpiredToken(omEx)) {
+ throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName + "/" + key, omEx);
+ } else if (isAccessDenied(omEx)) {
+ throw newError(S3ErrorTable.ACCESS_DENIED, bucketName + "/" + key, omEx);
+ }
+ }
+ throw ex;
} finally {
// Reset the thread-local message digest instance in case of exception
// and MessageDigest#digest is never called
@@ -994,8 +1019,19 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket,
}
}
+ private static OMException findOMException(Throwable t) {
+ Throwable currentException = t;
+ while (currentException != null) {
+ if (currentException instanceof OMException) {
+ return (OMException) currentException;
+ }
+ currentException = currentException.getCause();
+ }
+ return null;
+ }
+
@SuppressWarnings("checkstyle:ParameterNumber")
- void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen,
+ CopyResult copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen,
String destKey, String destBucket,
ReplicationConfig replication,
Map metadata,
@@ -1003,14 +1039,20 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen,
Map tags)
throws IOException {
long copyLength;
+ final String eTag;
+ final long modificationTime;
if (isDatastreamEnabled() && !(replication != null &&
replication.getReplicationType() == EC) &&
srcKeyLen > getDatastreamMinLength()) {
perf.appendStreamMode();
- copyLength = ObjectEndpointStreaming
+ final CopyResult copyResult = ObjectEndpointStreaming
.copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen,
getChunkSize(), replication, metadata, src, perf, startNanos, tags);
+ eTag = copyResult.getETag();
+ copyLength = copyResult.getSize();
+ modificationTime = copyResult.getModificationTime();
} else {
+ Map destMetadata;
try (OzoneOutputStream dest = getClientProtocol()
.createKey(volume.getName(), destBucket, destKey, srcKeyLen,
replication, metadata, tags)) {
@@ -1018,12 +1060,16 @@ srcKeyLen > getDatastreamMinLength()) {
getMetrics().updateCopyKeyMetadataStats(startNanos);
perf.appendMetaLatencyNanos(metadataLatencyNs);
copyLength = IOUtils.copyLarge(src, dest, 0, srcKeyLen, new byte[getIOBufferSize(srcKeyLen)]);
- String md5Hash = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase();
- dest.getMetadata().put(OzoneConsts.ETAG, md5Hash);
+ eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase();
+ destMetadata = dest.getMetadata();
+ destMetadata.put(OzoneConsts.ETAG, eTag);
}
+
+ modificationTime = S3Utils.getModificationTimeOrDefault(destMetadata, Time.now());
}
getMetrics().incCopyObjectSuccessLength(copyLength);
perf.appendSizeBytes(copyLength);
+ return new CopyResult(eTag, copyLength, modificationTime);
}
private CopyObjectResponse copyObject(OzoneVolume volume,
@@ -1039,7 +1085,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume,
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
- DigestInputStream sourceDigestInputStream = null;
+ final MessageDigest md5Digest = getMD5DigestInstance();
if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) {
String sourceBucketOwner = volume.getBucket(sourceBucket).getOwner();
@@ -1047,8 +1093,8 @@ private CopyObjectResponse copyObject(OzoneVolume volume,
S3Owner.verifyBucketOwnerConditionOnCopyOperation(getHeaders(), sourceBucket, sourceBucketOwner, null, null);
}
try {
- OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails(
- volume.getName(), sourceBucket, sourceKey);
+ final OzoneKeyDetails sourceKeyDetails = runWithS3ActionString(
+ "GetObject", () -> getClientProtocol().getKeyDetails(volume.getName(), sourceBucket, sourceKey));
// Checking whether we trying to copying to it self.
if (sourceBucket.equals(destBucket) && sourceKey
.equals(destkey)) {
@@ -1110,22 +1156,22 @@ private CopyObjectResponse copyObject(OzoneVolume volume,
throw ex;
}
- try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(),
- sourceBucket, sourceKey)) {
+ try (OzoneInputStream src = runWithS3ActionString(
+ "GetObject", () -> getClientProtocol().getKey(volume.getName(), sourceBucket, sourceKey));
+ DigestInputStream sourceDigestInputStream = new DigestInputStream(src, md5Digest)) {
getMetrics().updateCopyKeyMetadataStats(startNanos);
- sourceDigestInputStream = new DigestInputStream(src, getMD5DigestInstance());
- copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket, replicationConfig,
- customMetadata, perf, startNanos, tags);
- }
-
- final OzoneKeyDetails destKeyDetails = getClientProtocol().getKeyDetails(
- volume.getName(), destBucket, destkey);
+ final CopyResult copyResult = runWithS3ActionString("PutObject", () -> {
+ copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket,
+ replicationConfig, customMetadata, perf, startNanos, tags);
+ return null;
+ });
- getMetrics().updateCopyObjectSuccessStats(startNanos);
- CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
- copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(OzoneConsts.ETAG)));
- copyObjectResponse.setLastModified(destKeyDetails.getModificationTime());
- return copyObjectResponse;
+ getMetrics().updateCopyObjectSuccessStats(startNanos);
+ CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
+ copyObjectResponse.setETag(wrapInQuotes(copyResult.getETag()));
+ copyObjectResponse.setLastModified(Instant.ofEpochMilli(copyResult.getModificationTime()));
+ return copyObjectResponse;
+ }
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex);
@@ -1141,9 +1187,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume,
} finally {
// Reset the thread-local message digest instance in case of exception
// and MessageDigest#digest is never called
- if (sourceDigestInputStream != null) {
- sourceDigestInputStream.getMessageDigest().reset();
- }
+ md5Digest.reset();
}
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
index 7012734b1611..f07ea9f580fc 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
@@ -158,7 +158,7 @@ public static Pair putKeyWithStream(
}
@SuppressWarnings("checkstyle:ParameterNumber")
- public static long copyKeyWithStream(
+ public static CopyResult copyKeyWithStream(
OzoneBucket bucket,
String keyPath,
long length,
@@ -169,17 +169,22 @@ public static long copyKeyWithStream(
Map tags)
throws IOException {
long writeLen;
+ String eTag;
+ Map metadata;
try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath,
length, replicationConfig, keyMetadata, tags)) {
long metadataLatencyNs =
METRICS.updateCopyKeyMetadataStats(startNanos);
writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length);
- String eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest())
+ eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest())
.toLowerCase();
perf.appendMetaLatencyNanos(metadataLatencyNs);
- ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag);
+ metadata = streamOutput.getMetadata();
+ metadata.put(OzoneConsts.ETAG, eTag);
}
- return writeLen;
+
+ final long modificationTime = S3Utils.getModificationTimeOrDefault(metadata, Time.now());
+ return new CopyResult(eTag, writeLen, modificationTime);
}
private static long writeToStreamOutput(OzoneDataStreamOutput streamOutput,
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java
index 9e638a112a76..8fdb80d9a488 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java
@@ -26,7 +26,6 @@
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
-import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,7 +47,8 @@ public class RootEndpoint extends EndpointBase {
@GET
public Response get()
throws OS3Exception, IOException {
- long startNanos = Time.monotonicNowNanos();
+ S3RequestContext context = new S3RequestContext(this, S3GAction.LIST_S3_BUCKETS);
+ long startNanos = context.getStartNanos();
boolean auditSuccess = true;
try {
ListBucketResponse response = new ListBucketResponse();
@@ -73,11 +73,11 @@ public Response get()
return Response.ok(response).build();
} catch (Exception ex) {
auditSuccess = false;
- auditReadFailure(S3GAction.LIST_S3_BUCKETS, ex);
+ auditReadFailure(context.getAction(), ex);
throw ex;
} finally {
if (auditSuccess) {
- auditReadSuccess(S3GAction.LIST_S3_BUCKETS);
+ auditReadSuccess(context.getAction());
}
}
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java
index 4130feaf6fdb..ebcb773cef45 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java
@@ -36,6 +36,7 @@ class S3RequestContext {
this.startNanos = Time.monotonicNowNanos();
this.perf = new PerformanceStringBuilder();
this.action = action;
+ endpoint.applyS3Action(action);
}
long getStartNanos() {
@@ -59,6 +60,7 @@ S3GAction getAction() {
void setAction(S3GAction action) {
this.action = action;
+ endpoint.applyS3Action(action);
}
/**
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3GActionIamMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3GActionIamMapper.java
new file mode 100644
index 000000000000..9953ebe2020b
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3GActionIamMapper.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.util;
+
+import jakarta.annotation.Nullable;
+import org.apache.hadoop.ozone.audit.S3GAction;
+
+/**
+ * Maps S3 Gateway operations to AWS IAM S3 action names. Values align with
+ * {@code org.apache.hadoop.ozone.security.acl.iam.IamSessionPolicyResolver} so STS session
+ * policies and Ranger policy conditions use the same vocabulary.
+ */
+public final class S3GActionIamMapper {
+
+ private S3GActionIamMapper() {
+ }
+
+ /**
+ * @return S3 action string, or null if not applicable to IAM S3
+ */
+ public static @Nullable String toS3ActionString(@Nullable S3GAction action) {
+ if (action == null) {
+ return null;
+ }
+ switch (action) {
+ case GET_BUCKET:
+ case HEAD_BUCKET:
+ return "ListBucket";
+ case CREATE_BUCKET:
+ return "CreateBucket";
+ case DELETE_BUCKET:
+ return "DeleteBucket";
+ case GET_ACL:
+ return "GetBucketAcl";
+ case PUT_ACL:
+ return "PutBucketAcl";
+ case LIST_MULTIPART_UPLOAD:
+ return "ListBucketMultipartUploads";
+ case MULTI_DELETE:
+ case DELETE_KEY:
+ return "DeleteObject";
+ case LIST_S3_BUCKETS:
+ return "ListAllMyBuckets";
+ case CREATE_MULTIPART_KEY:
+ case CREATE_KEY:
+ case INIT_MULTIPART_UPLOAD:
+ case COMPLETE_MULTIPART_UPLOAD:
+ case CREATE_DIRECTORY:
+ return "PutObject";
+ case LIST_PARTS:
+ return "ListMultipartUploadParts";
+ case GET_KEY:
+ case HEAD_KEY:
+ return "GetObject";
+ case ABORT_MULTIPART_UPLOAD:
+ return "AbortMultipartUpload";
+ case GET_OBJECT_TAGGING:
+ return "GetObjectTagging";
+ case PUT_OBJECT_TAGGING:
+ return "PutObjectTagging";
+ case DELETE_OBJECT_TAGGING:
+ return "DeleteObjectTagging";
+ case PUT_OBJECT_ACL:
+ return "PutObjectAcl";
+ case COPY_OBJECT:
+ case CREATE_MULTIPART_KEY_BY_COPY:
+ // CopyObject / UploadPartCopy require distinct source (GetObject) and destination (PutObject)
+ // authorization. The endpoint code explicitly sets the IAM action string for each phase.
+ return null;
+ case GENERATE_SECRET:
+ case REVOKE_SECRET:
+ case ASSUME_ROLE:
+ default:
+ return null;
+ }
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
index 39af21bb2758..46fac1614e52 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
@@ -34,6 +34,7 @@
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.Base64;
+import java.util.Map;
import java.util.Objects;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.HttpHeaders;
@@ -43,6 +44,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
@@ -248,4 +250,25 @@ public static void validateContentMD5(String clientMD5, String serverMD5, String
}
}
+ /**
+ * Extracts the modification time from the metadata map or returns the default time.
+ *
+ * @param metadata the metadata map
+ * @param defaultTime the default time to return if not found or malformed
+ * @return the modification time
+ */
+ public static long getModificationTimeOrDefault(Map metadata, long defaultTime) {
+ if (metadata != null) {
+ String modificationTimeStr = metadata.get(OzoneConsts.MODIFICATION_TIME);
+ if (modificationTimeStr != null) {
+ try {
+ return Long.parseLong(modificationTimeStr);
+ } catch (NumberFormatException ignore) {
+ // Fall back to defaultTime
+ }
+ }
+ }
+ return defaultTime;
+ }
+
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCopyActionsAudit.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCopyActionsAudit.java
new file mode 100644
index 000000000000..380a7780e9e6
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCopyActionsAudit.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.X_AMZ_CONTENT_SHA256;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayInputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import javax.ws.rs.core.HttpHeaders;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
+import org.apache.hadoop.ozone.audit.S3GAction;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientStub;
+import org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint.ObjectRequestContext;
+import org.apache.hadoop.ozone.s3.util.S3Consts;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Verifies audit logging action for copy operations even if S3 action authorization strings are overridden internally.
+ * For example, S3G.COPY_OBJECT must use S3G.COPY_OBJECT as the audit action, even though internally the S3 actions
+ * checked are GetObject and PutObject.
+ */
+public class TestCopyActionsAudit {
+
+ @Test
+ public void testCopyObjectAuditActionRemainsCopyObject() throws Exception {
+ final String bucketName = OzoneConsts.S3_BUCKET;
+ final String srcKey = "src.txt";
+ final String destKey = "dest.txt";
+
+ final OzoneClient client = new OzoneClientStub();
+ client.getObjectStore().createS3Bucket(bucketName);
+ final OzoneBucket bucket = client.getObjectStore().getS3Bucket(bucketName);
+
+ try (OutputStream out = bucket.createKey(
+ srcKey, 3, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
+ new HashMap<>())) {
+ out.write("src".getBytes(UTF_8));
+ }
+
+ final HttpHeaders headers = mock(HttpHeaders.class);
+ when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("STANDARD");
+ when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature");
+ when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + srcKey);
+ when(headers.getHeaderString(HttpHeaders.CONTENT_LENGTH)).thenReturn("0");
+
+ final ObjectEndpoint endpoint = newEndpoint(client, headers);
+ final AuditingObjectOperationHandler auditing = spy(new AuditingObjectOperationHandler(endpoint));
+
+ final ObjectRequestContext requestContext = endpoint.new ObjectRequestContext(S3GAction.CREATE_KEY, bucketName);
+
+ auditing.handlePutRequest(requestContext, destKey, new ByteArrayInputStream(new byte[0]));
+
+ verify(auditing).auditWriteSuccess(eq(S3GAction.COPY_OBJECT), any(PerformanceStringBuilder.class));
+ }
+
+ @Test
+ public void testUploadPartCopyAuditActionRemainsCreateMultipartKeyByCopy() throws Exception {
+ final String bucketName = OzoneConsts.S3_BUCKET;
+ final String srcKey = "src-part.txt";
+ final String destKey = "dest-mpu.txt";
+
+ final OzoneClient client = new OzoneClientStub();
+ client.getObjectStore().createS3Bucket(bucketName);
+ final OzoneBucket bucket = client.getObjectStore().getS3Bucket(bucketName);
+
+ try (OutputStream out = bucket.createKey(
+ srcKey, 4, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
+ new HashMap<>())) {
+ out.write("part".getBytes(UTF_8));
+ }
+
+ final HttpHeaders headers = mock(HttpHeaders.class);
+ when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("STANDARD");
+ when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature");
+ when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(bucketName + "/" + srcKey);
+ when(headers.getHeaderString(HttpHeaders.CONTENT_LENGTH)).thenReturn("0");
+
+ final ObjectEndpoint endpoint = newEndpoint(client, headers);
+
+ final String uploadId = EndpointTestUtils.initiateMultipartUpload(endpoint, bucketName, destKey);
+ assertNotNull(uploadId);
+
+ endpoint.queryParamsForTest().set(S3Consts.QueryParams.UPLOAD_ID, uploadId);
+ endpoint.queryParamsForTest().setInt(S3Consts.QueryParams.PART_NUMBER, 1);
+
+ final AuditingObjectOperationHandler auditing = spy(new AuditingObjectOperationHandler(endpoint));
+ final ObjectRequestContext requestContext = endpoint.new ObjectRequestContext(S3GAction.CREATE_KEY, bucketName);
+
+ auditing.handlePutRequest(requestContext, destKey, new ByteArrayInputStream(new byte[0]));
+
+ verify(auditing).auditWriteSuccess(eq(S3GAction.CREATE_MULTIPART_KEY_BY_COPY), any(PerformanceStringBuilder.class));
+ }
+
+ private static ObjectEndpoint newEndpoint(OzoneClient client, HttpHeaders headers) {
+ return EndpointBuilder.newObjectEndpointBuilder()
+ .setClient(client)
+ .setHeaders(headers)
+ .build();
+ }
+}
+
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3GActionIamMapper.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3GActionIamMapper.java
new file mode 100644
index 000000000000..c7ae9e4e924c
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3GActionIamMapper.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.util;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import org.apache.hadoop.ozone.audit.S3GAction;
+import org.junit.jupiter.api.Test;
+
+/** Unit tests for {@link S3GActionIamMapper}. */
+public class TestS3GActionIamMapper {
+
+ @Test
+ public void mapsCoreObjectActions() {
+ assertEquals("ListBucket", S3GActionIamMapper.toS3ActionString(S3GAction.GET_BUCKET));
+ assertEquals("ListBucket", S3GActionIamMapper.toS3ActionString(S3GAction.HEAD_BUCKET));
+ assertEquals("CreateBucket", S3GActionIamMapper.toS3ActionString(S3GAction.CREATE_BUCKET));
+ assertEquals("DeleteBucket", S3GActionIamMapper.toS3ActionString(S3GAction.DELETE_BUCKET));
+ assertEquals("GetBucketAcl", S3GActionIamMapper.toS3ActionString(S3GAction.GET_ACL));
+ assertEquals("PutBucketAcl", S3GActionIamMapper.toS3ActionString(S3GAction.PUT_ACL));
+ assertEquals("ListBucketMultipartUploads", S3GActionIamMapper.toS3ActionString(S3GAction.LIST_MULTIPART_UPLOAD));
+ assertEquals("DeleteObject", S3GActionIamMapper.toS3ActionString(S3GAction.MULTI_DELETE));
+ assertEquals("DeleteObject", S3GActionIamMapper.toS3ActionString(S3GAction.DELETE_KEY));
+ assertEquals("ListAllMyBuckets", S3GActionIamMapper.toS3ActionString(S3GAction.LIST_S3_BUCKETS));
+ assertEquals("PutObject", S3GActionIamMapper.toS3ActionString(S3GAction.CREATE_MULTIPART_KEY));
+ assertEquals("PutObject", S3GActionIamMapper.toS3ActionString(S3GAction.CREATE_KEY));
+ assertEquals("PutObject", S3GActionIamMapper.toS3ActionString(S3GAction.INIT_MULTIPART_UPLOAD));
+ assertEquals("PutObject", S3GActionIamMapper.toS3ActionString(S3GAction.COMPLETE_MULTIPART_UPLOAD));
+ assertEquals("PutObject", S3GActionIamMapper.toS3ActionString(S3GAction.CREATE_DIRECTORY));
+ assertEquals("ListMultipartUploadParts", S3GActionIamMapper.toS3ActionString(S3GAction.LIST_PARTS));
+ assertEquals("GetObject", S3GActionIamMapper.toS3ActionString(S3GAction.GET_KEY));
+ assertEquals("GetObject", S3GActionIamMapper.toS3ActionString(S3GAction.HEAD_KEY));
+ assertEquals("AbortMultipartUpload", S3GActionIamMapper.toS3ActionString(S3GAction.ABORT_MULTIPART_UPLOAD));
+ assertEquals("GetObjectTagging", S3GActionIamMapper.toS3ActionString(S3GAction.GET_OBJECT_TAGGING));
+ assertEquals("PutObjectTagging", S3GActionIamMapper.toS3ActionString(S3GAction.PUT_OBJECT_TAGGING));
+ assertEquals("DeleteObjectTagging", S3GActionIamMapper.toS3ActionString(S3GAction.DELETE_OBJECT_TAGGING));
+ assertEquals("PutObjectAcl", S3GActionIamMapper.toS3ActionString(S3GAction.PUT_OBJECT_ACL));
+ }
+
+ @Test
+ public void copyActionsReturnNull() {
+ assertNull(S3GActionIamMapper.toS3ActionString(S3GAction.COPY_OBJECT));
+ assertNull(S3GActionIamMapper.toS3ActionString(S3GAction.CREATE_MULTIPART_KEY_BY_COPY));
+ }
+
+ @Test
+ public void nonIamActionsReturnNull() {
+ assertNull(S3GActionIamMapper.toS3ActionString(S3GAction.ASSUME_ROLE));
+ assertNull(S3GActionIamMapper.toS3ActionString(S3GAction.GENERATE_SECRET));
+ assertNull(S3GActionIamMapper.toS3ActionString(S3GAction.REVOKE_SECRET));
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/package-info.java
new file mode 100644
index 000000000000..4b7b37a574a1
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Unit tests for s3 utilities.
+ */
+package org.apache.hadoop.ozone.s3.util;