blob: d82fdf2a8e263c6d3eb6b2b3014ba0a85a8df825 [file] [log] [blame]
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and unstable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *unstable* .proto interface.
*/
syntax = "proto2";
option java_package = "org.apache.hadoop.ozone.protocol.proto";
option java_outer_classname = "OzoneManagerProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.ozone;
/**
This file contains the protocol to communicate with
Ozone Manager. Ozone Manager manages the namespace for ozone.
This is similar to Namenode for Ozone.
*/
import "hdds.proto";
import "Security.proto";
import "FSProtos.proto";
enum Type {
CreateVolume = 11;
SetVolumeProperty = 12;
CheckVolumeAccess = 13;
InfoVolume = 14;
DeleteVolume = 15;
ListVolume = 16;
CreateBucket = 21;
InfoBucket = 22;
SetBucketProperty = 23;
DeleteBucket = 24;
ListBuckets = 25;
CreateKey = 31;
LookupKey = 32;
RenameKey = 33;
DeleteKey = 34;
ListKeys = 35;
CommitKey = 36;
AllocateBlock = 37;
CreateS3Bucket = 41;
DeleteS3Bucket = 42;
InfoS3Bucket = 43;
ListS3Buckets = 44;
InitiateMultiPartUpload = 45;
CommitMultiPartUpload = 46;
CompleteMultiPartUpload = 47;
AbortMultiPartUpload = 48;
GetS3Secret = 49;
ListMultiPartUploadParts = 50;
ServiceList = 51;
DBUpdates = 53;
GetDelegationToken = 61;
RenewDelegationToken = 62;
CancelDelegationToken = 63;
GetFileStatus = 70;
CreateDirectory = 71;
CreateFile = 72;
LookupFile = 73;
ListStatus = 74;
AddAcl = 75;
RemoveAcl = 76;
SetAcl = 77;
GetAcl = 78;
PurgeKeys = 81;
ListMultipartUploads = 82;
}
message OMRequest {
required Type cmdType = 1; // Type of the command
// A string that identifies this command, we generate Trace ID in Ozone
// frontend and this allows us to trace that command all over ozone.
optional string traceID = 2;
required string clientId = 3;
optional UserInfo userInfo = 4;
optional CreateVolumeRequest createVolumeRequest = 11;
optional SetVolumePropertyRequest setVolumePropertyRequest = 12;
optional CheckVolumeAccessRequest checkVolumeAccessRequest = 13;
optional InfoVolumeRequest infoVolumeRequest = 14;
optional DeleteVolumeRequest deleteVolumeRequest = 15;
optional ListVolumeRequest listVolumeRequest = 16;
optional CreateBucketRequest createBucketRequest = 21;
optional InfoBucketRequest infoBucketRequest = 22;
optional SetBucketPropertyRequest setBucketPropertyRequest = 23;
optional DeleteBucketRequest deleteBucketRequest = 24;
optional ListBucketsRequest listBucketsRequest = 25;
optional CreateKeyRequest createKeyRequest = 31;
optional LookupKeyRequest lookupKeyRequest = 32;
optional RenameKeyRequest renameKeyRequest = 33;
optional DeleteKeyRequest deleteKeyRequest = 34;
optional ListKeysRequest listKeysRequest = 35;
optional CommitKeyRequest commitKeyRequest = 36;
optional AllocateBlockRequest allocateBlockRequest = 37;
optional S3CreateBucketRequest createS3BucketRequest = 41;
optional S3DeleteBucketRequest deleteS3BucketRequest = 42;
optional S3BucketInfoRequest infoS3BucketRequest = 43;
optional S3ListBucketsRequest listS3BucketsRequest = 44;
optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45;
optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46;
optional MultipartUploadCompleteRequest completeMultiPartUploadRequest = 47;
optional MultipartUploadAbortRequest abortMultiPartUploadRequest = 48;
optional GetS3SecretRequest getS3SecretRequest = 49;
optional MultipartUploadListPartsRequest listMultipartUploadPartsRequest = 50;
optional ServiceListRequest serviceListRequest = 51;
optional DBUpdatesRequest dbUpdatesRequest = 53;
optional hadoop.common.GetDelegationTokenRequestProto getDelegationTokenRequest = 61;
optional hadoop.common.RenewDelegationTokenRequestProto renewDelegationTokenRequest= 62;
optional hadoop.common.CancelDelegationTokenRequestProto cancelDelegationTokenRequest = 63;
optional UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = 64;
optional UpdateRenewDelegationTokenRequest updatedRenewDelegationTokenRequest = 65;
optional GetFileStatusRequest getFileStatusRequest = 70;
optional CreateDirectoryRequest createDirectoryRequest = 71;
optional CreateFileRequest createFileRequest = 72;
optional LookupFileRequest lookupFileRequest = 73;
optional ListStatusRequest listStatusRequest = 74;
optional AddAclRequest addAclRequest = 75;
optional RemoveAclRequest removeAclRequest = 76;
optional SetAclRequest setAclRequest = 77;
optional GetAclRequest getAclRequest = 78;
optional PurgeKeysRequest purgeKeysRequest = 81;
optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82;
optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83;
}
message OMResponse {
required Type cmdType = 1; // Type of the command
// A string that identifies this command, we generate Trace ID in Ozone
// frontend and this allows us to trace that command all over ozone.
optional string traceID = 2;
optional bool success = 3 [default=true];
optional string message = 4;
required Status status = 5;
optional string leaderOMNodeId = 6;
optional CreateVolumeResponse createVolumeResponse = 11;
optional SetVolumePropertyResponse setVolumePropertyResponse = 12;
optional CheckVolumeAccessResponse checkVolumeAccessResponse = 13;
optional InfoVolumeResponse infoVolumeResponse = 14;
optional DeleteVolumeResponse deleteVolumeResponse = 15;
optional ListVolumeResponse listVolumeResponse = 16;
optional CreateBucketResponse createBucketResponse = 21;
optional InfoBucketResponse infoBucketResponse = 22;
optional SetBucketPropertyResponse setBucketPropertyResponse = 23;
optional DeleteBucketResponse deleteBucketResponse = 24;
optional ListBucketsResponse listBucketsResponse = 25;
optional CreateKeyResponse createKeyResponse = 31;
optional LookupKeyResponse lookupKeyResponse = 32;
optional RenameKeyResponse renameKeyResponse = 33;
optional DeleteKeyResponse deleteKeyResponse = 34;
optional ListKeysResponse listKeysResponse = 35;
optional CommitKeyResponse commitKeyResponse = 36;
optional AllocateBlockResponse allocateBlockResponse = 37;
optional S3CreateBucketResponse createS3BucketResponse = 41;
optional S3DeleteBucketResponse deleteS3BucketResponse = 42;
optional S3BucketInfoResponse infoS3BucketResponse = 43;
optional S3ListBucketsResponse listS3BucketsResponse = 44;
optional MultipartInfoInitiateResponse initiateMultiPartUploadResponse = 45;
optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46;
optional MultipartUploadCompleteResponse completeMultiPartUploadResponse = 47;
optional MultipartUploadAbortResponse abortMultiPartUploadResponse = 48;
optional GetS3SecretResponse getS3SecretResponse = 49;
optional MultipartUploadListPartsResponse listMultipartUploadPartsResponse = 50;
optional ServiceListResponse ServiceListResponse = 51;
optional DBUpdatesResponse dbUpdatesResponse = 52;
optional GetDelegationTokenResponseProto getDelegationTokenResponse = 61;
optional RenewDelegationTokenResponseProto renewDelegationTokenResponse = 62;
optional CancelDelegationTokenResponseProto cancelDelegationTokenResponse = 63;
optional GetFileStatusResponse getFileStatusResponse = 70;
optional CreateDirectoryResponse createDirectoryResponse = 71;
optional CreateFileResponse createFileResponse = 72;
optional LookupFileResponse lookupFileResponse = 73;
optional ListStatusResponse listStatusResponse = 74;
optional AddAclResponse addAclResponse = 75;
optional RemoveAclResponse removeAclResponse = 76;
optional SetAclResponse setAclResponse = 77;
optional GetAclResponse getAclResponse = 78;
optional PurgeKeysResponse purgeKeysResponse = 81;
optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82;
}
enum Status {
OK = 1;
VOLUME_NOT_UNIQUE = 2;
VOLUME_NOT_FOUND = 3;
VOLUME_NOT_EMPTY = 4;
VOLUME_ALREADY_EXISTS = 5;
USER_NOT_FOUND = 6;
USER_TOO_MANY_VOLUMES = 7;
BUCKET_NOT_FOUND = 8;
BUCKET_NOT_EMPTY = 9;
BUCKET_ALREADY_EXISTS = 10;
KEY_ALREADY_EXISTS = 11;
KEY_NOT_FOUND = 12;
INVALID_KEY_NAME = 13;
ACCESS_DENIED = 14;
INTERNAL_ERROR = 15;
KEY_ALLOCATION_ERROR = 16;
KEY_DELETION_ERROR = 17;
KEY_RENAME_ERROR = 18;
METADATA_ERROR = 19;
OM_NOT_INITIALIZED = 20;
SCM_VERSION_MISMATCH_ERROR = 21;
S3_BUCKET_NOT_FOUND = 22;
S3_BUCKET_ALREADY_EXISTS = 23;
INITIATE_MULTIPART_UPLOAD_ERROR = 24;
MULTIPART_UPLOAD_PARTFILE_ERROR = 25;
NO_SUCH_MULTIPART_UPLOAD_ERROR = 26;
MISMATCH_MULTIPART_LIST = 27;
MISSING_UPLOAD_PARTS = 28;
COMPLETE_MULTIPART_UPLOAD_ERROR = 29;
ENTITY_TOO_SMALL = 30;
ABORT_MULTIPART_UPLOAD_FAILED = 31;
S3_SECRET_NOT_FOUND = 32;
INVALID_AUTH_METHOD = 33;
INVALID_TOKEN = 34;
TOKEN_EXPIRED = 35;
TOKEN_ERROR_OTHER = 36;
LIST_MULTIPART_UPLOAD_PARTS_FAILED = 37;
SCM_IN_SAFE_MODE = 38;
INVALID_REQUEST = 39;
BUCKET_ENCRYPTION_KEY_NOT_FOUND = 40;
UNKNOWN_CIPHER_SUITE = 41;
INVALID_KMS_PROVIDER = 42;
TOKEN_CREATION_ERROR = 43;
FILE_NOT_FOUND = 44;
DIRECTORY_NOT_FOUND = 45;
FILE_ALREADY_EXISTS = 46;
NOT_A_FILE = 47;
PERMISSION_DENIED = 48;
TIMEOUT = 49;
PREFIX_NOT_FOUND=50;
S3_BUCKET_INVALID_LENGTH = 51; // s3 bucket invalid length.
RATIS_ERROR = 52;
INVALID_PATH_IN_ACL_REQUEST = 53; // Invalid path name in acl request.
USER_MISMATCH = 54; // Error code when requested user name passed is
// different from remote user.
}
message VolumeInfo {
required string adminName = 1;
required string ownerName = 2;
required string volume = 3;
optional uint64 quotaInBytes = 4;
repeated hadoop.hdds.KeyValue metadata = 5;
repeated OzoneAclInfo volumeAcls = 6;
optional uint64 creationTime = 7;
optional uint64 objectID = 8;
optional uint64 updateID = 9;
}
/**
User information which will be extracted during RPC context and used
during validating Acl.
*/
message UserInfo {
optional string userName = 1;
optional string remoteAddress = 3;
}
/**
This will be used during OM HA, once leader generates token sends this
request via ratis to persist to OM DB. This request will be internally used
by OM for replicating token across a quorum of OMs.
*/
message UpdateGetDelegationTokenRequest {
required GetDelegationTokenResponseProto getDelegationTokenResponse = 1;
}
/**
This will be used during OM HA, once leader renews token, sends this
request via ratis to persist to OM DB. This request will be internally used
by OM for replicating renewed token information across a quorum of OMs.
*/
message UpdateRenewDelegationTokenRequest {
required hadoop.common.RenewDelegationTokenRequestProto
renewDelegationTokenRequest = 1;
required RenewDelegationTokenResponseProto renewDelegationTokenResponse = 2;
}
/**
Creates a volume
*/
message CreateVolumeRequest {
required VolumeInfo volumeInfo = 1;
}
message CreateVolumeResponse {
}
message UserVolumeInfo {
repeated string volumeNames = 1;
optional uint64 objectID = 2;
optional uint64 updateID = 3;
}
/**
Changes the Volume Properties -- like ownership and quota for a volume.
*/
message SetVolumePropertyRequest {
required string volumeName = 1;
optional string ownerName = 2;
optional uint64 quotaInBytes = 3;
}
message SetVolumePropertyResponse {
}
/**
* Checks if the user has specified permissions for the volume
*/
message CheckVolumeAccessRequest {
required string volumeName = 1;
required OzoneAclInfo userAcl = 2;
}
message CheckVolumeAccessResponse {
}
/**
Returns information about a volume.
*/
message InfoVolumeRequest {
required string volumeName = 1;
}
message InfoVolumeResponse {
optional VolumeInfo volumeInfo = 2;
}
/**
Deletes an existing volume.
*/
message DeleteVolumeRequest {
required string volumeName = 1;
}
message DeleteVolumeResponse {
}
/**
List Volumes -- List all volumes in the cluster or by user.
*/
message ListVolumeRequest {
enum Scope {
USER_VOLUMES = 1; // User volumes -- called by user
VOLUMES_BY_USER = 2; // User volumes - called by Admin
VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
}
required Scope scope = 1;
optional string userName = 2;
optional string prefix = 3;
optional string prevKey = 4;
optional uint32 maxKeys = 5;
}
message ListVolumeResponse {
repeated VolumeInfo volumeInfo = 2;
}
message BucketInfo {
required string volumeName = 1;
required string bucketName = 2;
repeated OzoneAclInfo acls = 3;
required bool isVersionEnabled = 4 [default = false];
required StorageTypeProto storageType = 5 [default = DISK];
optional uint64 creationTime = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional BucketEncryptionInfoProto beinfo = 8;
}
enum StorageTypeProto {
DISK = 1;
SSD = 2;
ARCHIVE = 3;
RAM_DISK = 4;
}
/**
* Cipher suite.
*/
enum CipherSuiteProto {
UNKNOWN = 1;
AES_CTR_NOPADDING = 2;
}
/**
* Crypto protocol version used to access encrypted files.
*/
enum CryptoProtocolVersionProto {
UNKNOWN_PROTOCOL_VERSION = 1;
ENCRYPTION_ZONES = 2;
}
/**
* Encryption information for bucket (bucket key)
*/
message BucketEncryptionInfoProto {
required string keyName = 1;
optional CipherSuiteProto suite = 2;
optional CryptoProtocolVersionProto cryptoProtocolVersion = 3;
}
/**
* Encryption information for a file.
*/
message FileEncryptionInfoProto {
required CipherSuiteProto suite = 1;
required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
required bytes key = 3;
required bytes iv = 4;
required string keyName = 5;
required string ezKeyVersionName = 6;
}
/**
* Encryption information for an individual
* file within an encryption zone
*/
message PerFileEncryptionInfoProto {
required bytes key = 1;
required bytes iv = 2;
required string ezKeyVersionName = 3;
}
message DataEncryptionKeyProto {
required uint32 keyId = 1;
required bytes nonce = 3;
required bytes encryptionKey = 4;
required uint64 expiryDate = 5;
optional string encryptionAlgorithm = 6;
}
message BucketArgs {
required string volumeName = 1;
required string bucketName = 2;
optional bool isVersionEnabled = 5;
optional StorageTypeProto storageType = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
}
message PrefixInfo {
required string name = 1;
repeated OzoneAclInfo acls = 2;
repeated hadoop.hdds.KeyValue metadata = 3;
}
message OzoneObj {
enum ObjectType {
VOLUME = 1;
BUCKET = 2;
KEY = 3;
PREFIX = 4;
}
enum StoreType {
OZONE = 1;
S3 = 2;
}
required ObjectType resType = 1;
required StoreType storeType = 2 [default = S3];
required string path = 3;
}
message OzoneAclInfo {
enum OzoneAclType {
USER = 1;
GROUP = 2;
WORLD = 3;
ANONYMOUS = 4;
CLIENT_IP = 5;
}
enum OzoneAclScope {
ACCESS = 0;
DEFAULT = 1;
}
required OzoneAclType type = 1;
required string name = 2;
required bytes rights = 3;
required OzoneAclScope aclScope = 4 [default = ACCESS];
}
message GetAclRequest {
required OzoneObj obj = 1;
}
message GetAclResponse {
repeated OzoneAclInfo acls = 1;
}
message AddAclRequest {
required OzoneObj obj = 1;
required OzoneAclInfo acl = 2;
}
message AddAclResponse {
required bool response = 1;
}
message RemoveAclRequest {
required OzoneObj obj = 1;
required OzoneAclInfo acl = 2;
}
message RemoveAclResponse {
required bool response = 1;
}
message SetAclRequest {
required OzoneObj obj = 1;
repeated OzoneAclInfo acl = 2;
}
message SetAclResponse {
required bool response = 1;
}
message CreateBucketRequest {
required BucketInfo bucketInfo = 1;
}
message CreateBucketResponse {
}
message InfoBucketRequest {
required string volumeName = 1;
required string bucketName = 2;
}
message InfoBucketResponse {
optional BucketInfo bucketInfo = 2;
}
message SetBucketPropertyRequest {
optional BucketArgs bucketArgs = 1;
}
message SetBucketPropertyResponse {
}
message DeleteBucketRequest {
required string volumeName = 1;
required string bucketName = 2;
}
message DeleteBucketResponse {
}
message ListBucketsRequest {
required string volumeName = 1;
optional string startKey = 2;
optional string prefix = 3;
optional int32 count = 4;
}
message ListBucketsResponse {
repeated BucketInfo bucketInfo = 2;
}
message KeyArgs {
required string volumeName = 1;
required string bucketName = 2;
required string keyName = 3;
optional uint64 dataSize = 4;
optional hadoop.hdds.ReplicationType type = 5;
optional hadoop.hdds.ReplicationFactor factor = 6;
repeated KeyLocation keyLocations = 7;
optional bool isMultipartKey = 8;
optional string multipartUploadID = 9;
optional uint32 multipartNumber = 10;
repeated hadoop.hdds.KeyValue metadata = 11;
repeated OzoneAclInfo acls = 12;
// This will be set when the request is received in pre-Execute. This
// value is used in setting creation/modification time depending on the
// request type.
optional uint64 modificationTime = 13;
optional bool sortDatanodes = 14;
}
message KeyLocation {
required hadoop.hdds.BlockID blockID = 1;
required uint64 offset = 3;
required uint64 length = 4;
// indicated at which version this block gets created.
optional uint64 createVersion = 5;
optional hadoop.common.TokenProto token = 6;
// Walk around to include pipeline info for client read/write
// without talking to scm.
// NOTE: the pipeline info may change after pipeline close.
// So eventually, we will have to change back to call scm to
// get the up to date pipeline information. This will need o3fs
// provide not only a OM delegation token but also a SCM delegation token
optional hadoop.hdds.Pipeline pipeline = 7;
}
message KeyLocationList {
optional uint64 version = 1;
repeated KeyLocation keyLocations = 2;
optional FileEncryptionInfoProto fileEncryptionInfo = 3;
}
message KeyInfo {
required string volumeName = 1;
required string bucketName = 2;
required string keyName = 3;
required uint64 dataSize = 4;
required hadoop.hdds.ReplicationType type = 5;
required hadoop.hdds.ReplicationFactor factor = 6;
repeated KeyLocationList keyLocationList = 7;
required uint64 creationTime = 8;
required uint64 modificationTime = 9;
optional uint64 latestVersion = 10;
repeated hadoop.hdds.KeyValue metadata = 11;
optional FileEncryptionInfoProto fileEncryptionInfo = 12;
repeated OzoneAclInfo acls = 13;
}
message RepeatedKeyInfo {
repeated KeyInfo keyInfo = 1;
}
message OzoneFileStatusProto {
required hadoop.fs.FileStatusProto status = 1;
}
message GetFileStatusRequest {
required KeyArgs keyArgs = 1;
}
message GetFileStatusResponse {
required OzoneFileStatusProto status = 1;
}
message CreateDirectoryRequest {
required KeyArgs keyArgs = 1;
}
message CreateDirectoryResponse {
}
message CreateFileRequest {
required KeyArgs keyArgs = 1;
required bool isRecursive = 2;
required bool isOverwrite = 3;
// Set in OM HA during preExecute step. This way all OM's use same ID in
// OM HA.
optional uint64 clientID = 4;
}
message CreateFileResponse {
optional KeyInfo keyInfo = 1;
// clients' followup request may carry this ID for stateful operations
// (similar to a cookie).
optional uint64 ID = 2;
optional uint64 openVersion = 3;
}
message LookupFileRequest {
required KeyArgs keyArgs = 1;
}
message LookupFileResponse {
optional KeyInfo keyInfo = 1;
}
message ListStatusRequest {
required KeyArgs keyArgs = 1;
required bool recursive = 2;
required string startKey = 3;
required uint64 numEntries = 4;
}
message ListStatusResponse {
repeated OzoneFileStatusProto statuses = 1;
}
message CreateKeyRequest {
required KeyArgs keyArgs = 1;
// Set in OM HA during preExecute step. This way all OM's use same ID in
// OM HA.
optional uint64 clientID = 2;
}
message CreateKeyResponse {
optional KeyInfo keyInfo = 2;
// clients' followup request may carry this ID for stateful operations
// (similar to a cookie).
optional uint64 ID = 3;
optional uint64 openVersion = 4;
}
message LookupKeyRequest {
required KeyArgs keyArgs = 1;
}
message LookupKeyResponse {
optional KeyInfo keyInfo = 2;
// clients' followup request may carry this ID for stateful operations (similar
// to a cookie).
optional uint64 ID = 3;
// TODO : allow specifiying a particular version to read.
optional uint64 openVersion = 4;
}
message RenameKeyRequest{
required KeyArgs keyArgs = 1;
required string toKeyName = 2;
}
message RenameKeyResponse{
}
message DeleteKeyRequest {
required KeyArgs keyArgs = 1;
}
message DeleteKeyResponse {
optional KeyInfo keyInfo = 2;
// clients' followup request may carry this ID for stateful operations
// (similar to a cookie).
optional uint64 ID = 3;
optional uint64 openVersion = 4;
}
message PurgeKeysRequest {
repeated string keys = 1;
}
message PurgeKeysResponse {
}
message OMTokenProto {
enum Type {
DELEGATION_TOKEN = 1;
S3TOKEN = 2;
};
required Type type = 1;
optional uint32 version = 2;
optional string owner = 3;
optional string renewer = 4;
optional string realUser = 5;
optional uint64 issueDate = 6;
optional uint64 maxDate = 7;
optional uint32 sequenceNumber = 8;
optional uint32 masterKeyId = 9;
optional uint64 expiryDate = 10;
optional string omCertSerialId = 11;
optional string accessKeyId = 12;
optional string signature = 13;
optional string strToSign = 14;
}
message SecretKeyProto {
required uint32 keyId = 1;
required uint64 expiryDate = 2;
required bytes privateKeyBytes = 3;
required bytes publicKeyBytes = 4;
}
message ListKeysRequest {
required string volumeName = 1;
required string bucketName = 2;
optional string startKey = 3;
optional string prefix = 4;
optional int32 count = 5;
}
message ListKeysResponse {
repeated KeyInfo keyInfo = 2;
}
message CommitKeyRequest {
required KeyArgs keyArgs = 1;
required uint64 clientID = 2;
}
message CommitKeyResponse {
}
message AllocateBlockRequest {
required KeyArgs keyArgs = 1;
required uint64 clientID = 2;
optional hadoop.hdds.ExcludeListProto excludeList = 3;
// During HA on one of the OM nodes, we allocate block and send the
// AllocateBlockRequest with keyLocation set. If this is set, no need to
// call scm again in OM Ratis applyTransaction just append it to DB.
optional KeyLocation keyLocation = 4;
}
message AllocateBlockResponse {
optional KeyLocation keyLocation = 2;
}
message ServiceListRequest {
}
message DBUpdatesRequest {
required uint64 sequenceNumber = 1;
}
message ServiceListResponse {
repeated ServiceInfo serviceInfo = 2;
// When security is enabled, return SCM CA certificate to Ozone client
// to set up gRPC TLS for client to authenticate server(DN).
optional string caCertificate = 3;
}
message DBUpdatesResponse {
required uint64 sequenceNumber = 1;
repeated bytes data = 2;
}
message ServicePort {
enum Type {
RPC = 1;
HTTP = 2;
HTTPS = 3;
RATIS = 4;
};
required Type type = 1;
required uint32 value = 2;
}
message ServiceInfo {
required hadoop.hdds.NodeType nodeType = 1;
required string hostname = 2;
repeated ServicePort servicePorts = 3;
}
message S3CreateBucketRequest {
required string userName = 1;
required string s3bucketname = 2;
// This will be set during OM HA by one of the OM node. In future if more
// data fields are required to create volume/bucket we can add them to
// this. This is the reason for creating a new message type for this.
// S3CreateBucket means create volume from userName and create bucket
// with s3BucketName.
optional S3CreateVolumeInfo s3CreateVolumeInfo = 3;
}
message S3CreateVolumeInfo {
// Creation time set in preExecute on one of the OM node.
required uint64 creationTime = 1;
}
message S3CreateBucketResponse {
}
message S3DeleteBucketRequest {
required string s3bucketName = 1;
}
message S3DeleteBucketResponse {
}
message S3BucketInfoRequest {
required string s3bucketName = 1;
}
message S3BucketInfoResponse {
optional string ozoneMapping = 2;
}
message S3ListBucketsRequest {
required string userName = 1;
optional string startKey = 2;
optional string prefix = 3;
optional int32 count = 4;
}
message S3ListBucketsResponse {
repeated BucketInfo bucketInfo = 2;
}
message MultipartInfoInitiateRequest {
required KeyArgs keyArgs = 1;
}
message MultipartInfoInitiateResponse {
required string volumeName = 1;
required string bucketName = 2;
required string keyName = 3;
required string multipartUploadID = 4;
}
message MultipartKeyInfo {
required string uploadID = 4;
repeated PartKeyInfo partKeyInfoList = 5;
}
message PartKeyInfo {
required string partName = 1;
required uint32 partNumber = 2;
required KeyInfo partKeyInfo = 3;
}
message MultipartCommitUploadPartRequest {
required KeyArgs keyArgs = 1;
required uint64 clientID = 2;
}
message MultipartCommitUploadPartResponse {
// This one is returned as Etag for S3.
optional string partName = 1;
}
message MultipartUploadCompleteRequest {
required KeyArgs keyArgs = 1;
repeated Part partsList = 2;
}
message MultipartUploadCompleteResponse {
optional string volume = 1;
optional string bucket = 2;
optional string key = 3;
optional string hash = 4; // This will be used as etag for s3
}
message Part {
required uint32 partNumber = 1;
required string partName = 2;
}
message MultipartUploadAbortRequest {
required KeyArgs keyArgs = 1;
}
message MultipartUploadAbortResponse {
}
message MultipartUploadListPartsRequest {
required string volume = 1;
required string bucket = 2;
required string key = 3;
required string uploadID = 4;
optional uint32 partNumbermarker = 5;
optional uint32 maxParts = 6;
}
message MultipartUploadListPartsResponse {
optional hadoop.hdds.ReplicationType type = 2;
optional hadoop.hdds.ReplicationFactor factor = 3;
optional uint32 nextPartNumberMarker = 4;
optional bool isTruncated = 5;
repeated PartInfo partsList = 6;
}
message ListMultipartUploadsRequest {
required string volume = 1;
required string bucket = 2;
required string prefix = 3;
}
message ListMultipartUploadsResponse {
optional bool isTruncated = 1;
repeated MultipartUploadInfo uploadsList = 2;
}
message MultipartUploadInfo {
required string volumeName = 1;
required string bucketName = 2;
required string keyName = 3;
required string uploadId = 4;
required uint64 creationTime = 5;
required hadoop.hdds.ReplicationType type = 6;
required hadoop.hdds.ReplicationFactor factor = 7;
}
message PartInfo {
required uint32 partNumber = 1;
required string partName = 2;
required uint64 modificationTime = 3;
required uint64 size = 4;
}
message GetDelegationTokenResponseProto {
optional hadoop.common.GetDelegationTokenResponseProto response = 2;
}
message RenewDelegationTokenResponseProto {
optional hadoop.common.RenewDelegationTokenResponseProto response = 2;
}
message CancelDelegationTokenResponseProto {
optional hadoop.common.CancelDelegationTokenResponseProto response = 2;
}
message S3Secret {
required string kerberosID = 1;
required string awsSecret = 2;
}
message GetS3SecretRequest {
required string kerberosID = 1;
}
message GetS3SecretResponse {
required S3Secret s3Secret = 2;
}
/**
This will be used internally by OM to replicate S3 Secret across quorum of
OM's.
*/
message UpdateGetS3SecretRequest {
required string kerberosID = 1;
required string awsSecret = 2;
}
/**
The OM service that takes care of Ozone namespace.
*/
service OzoneManagerService {
// A client-to-OM RPC to send client requests to OM Ratis server
rpc submitRequest(OMRequest)
returns(OMResponse);
}