blob: 3733b06b7354965685e434fd0b867de4c7841b2a [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.container.keyvalue.helpers;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.MetadataStore;
import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class which defines utility methods for KeyValueContainer.
*/
public final class KeyValueContainerUtil {
/* Never constructed. */
private KeyValueContainerUtil() {
}
private static final Logger LOG = LoggerFactory.getLogger(
KeyValueContainerUtil.class);
/**
* creates metadata path, chunks path and metadata DB for the specified
* container.
*
* @param containerMetaDataPath
* @throws IOException
*/
public static void createContainerMetaData(File containerMetaDataPath, File
chunksPath, File dbFile, Configuration conf) throws IOException {
Preconditions.checkNotNull(containerMetaDataPath);
Preconditions.checkNotNull(conf);
if (!containerMetaDataPath.mkdirs()) {
LOG.error("Unable to create directory for metadata storage. Path: {}",
containerMetaDataPath);
throw new IOException("Unable to create directory for metadata storage." +
" Path: " + containerMetaDataPath);
}
MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf)
.setCreateIfMissing(true).setDbFile(dbFile).build();
// we close since the SCM pre-creates containers.
// we will open and put Db handle into a cache when keys are being created
// in a container.
store.close();
if (!chunksPath.mkdirs()) {
LOG.error("Unable to create chunks directory Container {}",
chunksPath);
//clean up container metadata path and metadata db
FileUtils.deleteDirectory(containerMetaDataPath);
FileUtils.deleteDirectory(containerMetaDataPath.getParentFile());
throw new IOException("Unable to create directory for data storage." +
" Path: " + chunksPath);
}
}
/**
* remove Container if it is empty.
* <p>
* There are three things we need to delete.
* <p>
* 1. Container file and metadata file. 2. The Level DB file 3. The path that
* we created on the data location.
*
* @param containerData - Data of the container to remove.
* @param conf - configuration of the cluster.
* @throws IOException
*/
public static void removeContainer(KeyValueContainerData containerData,
Configuration conf)
throws IOException {
Preconditions.checkNotNull(containerData);
File containerMetaDataPath = new File(containerData
.getMetadataPath());
File chunksPath = new File(containerData.getChunksPath());
// Close the DB connection and remove the DB handler from cache
BlockUtils.removeDB(containerData, conf);
// Delete the Container MetaData path.
FileUtils.deleteDirectory(containerMetaDataPath);
//Delete the Container Chunks Path.
FileUtils.deleteDirectory(chunksPath);
//Delete Container directory
FileUtils.deleteDirectory(containerMetaDataPath.getParentFile());
}
/**
* Returns a ReadContainer Response.
*
* @param request Request
* @param containerData - data
* @return Response.
*/
public static ContainerCommandResponseProto getReadContainerResponse(
ContainerCommandRequestProto request,
KeyValueContainerData containerData) {
Preconditions.checkNotNull(containerData);
ContainerProtos.ReadContainerResponseProto.Builder response =
ContainerProtos.ReadContainerResponseProto.newBuilder();
response.setContainerData(containerData.getProtoBufMessage());
ContainerCommandResponseProto.Builder builder =
ContainerUtils.getSuccessResponseBuilder(request);
builder.setReadContainer(response);
return builder.build();
}
/**
* Parse KeyValueContainerData and verify checksum.
* @param kvContainerData
* @param config
* @throws IOException
*/
public static void parseKVContainerData(KeyValueContainerData kvContainerData,
Configuration config) throws IOException {
long containerID = kvContainerData.getContainerID();
File metadataPath = new File(kvContainerData.getMetadataPath());
// Verify Checksum
ContainerUtils.verifyChecksum(kvContainerData);
File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
metadataPath, containerID);
if (!dbFile.exists()) {
LOG.error("Container DB file is missing for ContainerID {}. " +
"Skipping loading of this container.", containerID);
// Don't further process this container, as it is missing db file.
return;
}
kvContainerData.setDbFile(dbFile);
try(ReferenceCountedDB metadata =
BlockUtils.getDB(kvContainerData, config)) {
long bytesUsed = 0;
List<Map.Entry<byte[], byte[]>> liveKeys = metadata.getStore()
.getRangeKVs(null, Integer.MAX_VALUE,
MetadataKeyFilters.getNormalKeyFilter());
bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
BlockData blockData;
try {
blockData = BlockUtils.getBlockData(e.getValue());
return blockData.getSize();
} catch (IOException ex) {
return 0L;
}
}).sum();
kvContainerData.setBytesUsed(bytesUsed);
kvContainerData.setKeyCount(liveKeys.size());
byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
if (bcsId != null) {
kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
}
}
}
/**
* Returns the path where data or chunks live for a given container.
*
* @param kvContainerData - KeyValueContainerData
* @return - Path to the chunks directory
*/
public static Path getDataDirectory(KeyValueContainerData kvContainerData) {
String chunksPath = kvContainerData.getChunksPath();
Preconditions.checkNotNull(chunksPath);
return Paths.get(chunksPath);
}
/**
* Container metadata directory -- here is where the level DB and
* .container file lives.
*
* @param kvContainerData - KeyValueContainerData
* @return Path to the metadata directory
*/
public static Path getMetadataDirectory(
KeyValueContainerData kvContainerData) {
String metadataPath = kvContainerData.getMetadataPath();
Preconditions.checkNotNull(metadataPath);
return Paths.get(metadataPath);
}
}