blob: ede88877d3b7a50e61a563120e2c725a994abf5d [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.upgrade;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.StringReader;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Stack;
import java.util.StringTokenizer;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.persistence.EntityManager;
import javax.xml.bind.JAXBException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.DBAccessor;
import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
import org.apache.ambari.server.orm.dao.ArtifactDAO;
import org.apache.ambari.server.orm.dao.MetainfoDAO;
import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
import org.apache.ambari.server.orm.dao.PermissionDAO;
import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
import org.apache.ambari.server.orm.entities.ArtifactEntity;
import org.apache.ambari.server.orm.entities.MetainfoEntity;
import org.apache.ambari.server.orm.entities.PermissionEntity;
import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.PropertyUpgradeBehavior;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.alert.SourceType;
import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
import org.apache.ambari.server.utils.VersionUtils;
import org.apache.ambari.server.view.ViewArchiveUtility;
import org.apache.ambari.server.view.configuration.ViewConfig;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.xml.sax.InputSource;
import com.google.common.collect.Maps;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Provider;
import com.google.inject.persist.Transactional;
public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
@Inject
protected DBAccessor dbAccessor;
@Inject
protected Configuration configuration;
@Inject
protected StackUpgradeUtil stackUpgradeUtil;
@Inject
protected ViewArchiveUtility archiveUtility;
protected Injector injector;
// map and list with constants, for filtration like in stack advisor
/**
* Override variable in child's if table name was changed
*/
protected String ambariSequencesTable = "ambari_sequences";
/**
* The user name to use as the authenticated user when perform authenticated tasks or operations
* that require the name of the authenticated user
*/
protected static final String AUTHENTICATED_USER_NAME = "ambari-upgrade";
private static final String CONFIGURATION_TYPE_HIVE_SITE = "hive-site";
private static final String CONFIGURATION_TYPE_HDFS_SITE = "hdfs-site";
public static final String CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES = "ranger-hbase-plugin-properties";
public static final String CONFIGURATION_TYPE_RANGER_KNOX_PLUGIN_PROPERTIES = "ranger-knox-plugin-properties";
public static final String CONFIGURATION_TYPE_RANGER_HIVE_PLUGIN_PROPERTIES = "ranger-hive-plugin-properties";
private static final String PROPERTY_DFS_NAMESERVICES = "dfs.nameservices";
private static final String PROPERTY_HIVE_SERVER2_AUTHENTICATION = "hive.server2.authentication";
public static final String PROPERTY_RANGER_HBASE_PLUGIN_ENABLED = "ranger-hbase-plugin-enabled";
public static final String PROPERTY_RANGER_KNOX_PLUGIN_ENABLED = "ranger-knox-plugin-enabled";
public static final String PROPERTY_RANGER_HIVE_PLUGIN_ENABLED = "ranger-hive-plugin-enabled";
public static final String YARN_SCHEDULER_CAPACITY_ROOT_QUEUE = "yarn.scheduler.capacity.root";
public static final String YARN_SCHEDULER_CAPACITY_ROOT_QUEUES = "yarn.scheduler.capacity.root.queues";
public static final String QUEUES = "queues";
public static final String ALERT_URL_PROPERTY_CONNECTION_TIMEOUT = "connection_timeout";
private static final Logger LOG = LoggerFactory.getLogger
(AbstractUpgradeCatalog.class);
private static final Map<String, UpgradeCatalog> upgradeCatalogMap =
new HashMap<String, UpgradeCatalog>();
@Inject
public AbstractUpgradeCatalog(Injector injector) {
this.injector = injector;
injector.injectMembers(this);
registerCatalog(this);
}
protected AbstractUpgradeCatalog() {
}
/**
* Every subclass needs to register itself
*/
protected void registerCatalog(UpgradeCatalog upgradeCatalog) {
upgradeCatalogMap.put(upgradeCatalog.getTargetVersion(), upgradeCatalog);
}
/**
* Add new sequence to <code>ambariSequencesTable</code>.
* @param seqName name of sequence to be inserted
* @param seqDefaultValue initial value for the sequence
* @param ignoreFailure true to ignore insert sql errors
* @throws SQLException
*/
protected final void addSequence(String seqName, Long seqDefaultValue, boolean ignoreFailure) throws SQLException{
// check if sequence is already in the database
Statement statement = null;
ResultSet rs = null;
try {
statement = dbAccessor.getConnection().createStatement();
if (statement != null) {
rs = statement.executeQuery(String.format("SELECT COUNT(*) from %s where sequence_name='%s'", ambariSequencesTable, seqName));
if (rs != null) {
if (rs.next() && rs.getInt(1) == 0) {
dbAccessor.executeQuery(String.format("INSERT INTO %s(sequence_name, sequence_value) VALUES('%s', %d)", ambariSequencesTable, seqName, seqDefaultValue), ignoreFailure);
} else {
LOG.warn("Sequence {} already exists, skipping", seqName);
}
}
}
} finally {
if (rs != null) {
rs.close();
}
if (statement != null) {
statement.close();
}
}
}
/**
* Add several new sequences to <code>ambariSequencesTable</code>.
* @param seqNames list of sequences to be inserted
* @param seqDefaultValue initial value for the sequence
* @param ignoreFailure true to ignore insert sql errors
* @throws SQLException
*
*/
protected final void addSequences(List<String> seqNames, Long seqDefaultValue, boolean ignoreFailure) throws SQLException{
// ToDo: rewrite function to use one SQL call per select/insert for all items
for (String seqName: seqNames){
addSequence(seqName, seqDefaultValue, ignoreFailure);
}
}
@Override
public String getSourceVersion() {
return null;
}
protected static UpgradeCatalog getUpgradeCatalog(String version) {
return upgradeCatalogMap.get(version);
}
protected static Document convertStringToDocument(String xmlStr) {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder;
Document doc = null;
try
{
builder = factory.newDocumentBuilder();
doc = builder.parse( new InputSource( new StringReader( xmlStr ) ) );
} catch (Exception e) {
LOG.error("Error during convertation from String \"" + xmlStr + "\" to Xml!", e);
}
return doc;
}
protected static boolean isConfigEnabled(Cluster cluster, String configType, String propertyName) {
boolean isRangerPluginEnabled = false;
if (cluster != null) {
Config rangerPluginProperties = cluster.getDesiredConfigByType(configType);
if (rangerPluginProperties != null) {
String rangerPluginEnabled = rangerPluginProperties.getProperties().get(propertyName);
if (StringUtils.isNotEmpty(rangerPluginEnabled)) {
isRangerPluginEnabled = "yes".equalsIgnoreCase(rangerPluginEnabled);
}
}
}
return isRangerPluginEnabled;
}
protected static class VersionComparator implements Comparator<UpgradeCatalog> {
@Override
public int compare(UpgradeCatalog upgradeCatalog1,
UpgradeCatalog upgradeCatalog2) {
//make sure FinalUpgradeCatalog runs last
if (upgradeCatalog1.isFinal() ^ upgradeCatalog2.isFinal()) {
return Boolean.compare(upgradeCatalog1.isFinal(), upgradeCatalog2.isFinal());
}
return VersionUtils.compareVersions(upgradeCatalog1.getTargetVersion(),
upgradeCatalog2.getTargetVersion(), 4);
}
}
/**
* Update metainfo to new version.
*/
@Transactional
public int updateMetaInfoVersion(String version) {
int rows = 0;
if (version != null) {
MetainfoDAO metainfoDAO = injector.getInstance(MetainfoDAO.class);
MetainfoEntity versionEntity = metainfoDAO.findByKey("version");
if (versionEntity != null) {
versionEntity.setMetainfoValue(version);
metainfoDAO.merge(versionEntity);
} else {
versionEntity = new MetainfoEntity();
versionEntity.setMetainfoName("version");
versionEntity.setMetainfoValue(version);
metainfoDAO.create(versionEntity);
}
}
return rows;
}
/*
* This method will check all Web and Metric alerts one by one.
* Parameter connection_timeout will be added to every alert which
* doesn't contain it.
* */
public void addConnectionTimeoutParamForWebAndMetricAlerts() {
LOG.info("Updating alert definitions.");
AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
Clusters clusters = ambariManagementController.getClusters();
JsonParser jsonParser = new JsonParser();
for (final Cluster cluster : getCheckedClusterMap(clusters).values()) {
long clusterID = cluster.getClusterId();
List<AlertDefinitionEntity> alertDefinitionList = alertDefinitionDAO.findAll(clusterID);
for (AlertDefinitionEntity alertDefinitionEntity : alertDefinitionList) {
SourceType sourceType = alertDefinitionEntity.getSourceType();
if (sourceType == SourceType.METRIC || sourceType == SourceType.WEB) {
String source = alertDefinitionEntity.getSource();
JsonObject rootJson = jsonParser.parse(source).getAsJsonObject();
JsonObject uriJson = rootJson.get("uri").getAsJsonObject();
if (!uriJson.has(ALERT_URL_PROPERTY_CONNECTION_TIMEOUT)) {
uriJson.addProperty(ALERT_URL_PROPERTY_CONNECTION_TIMEOUT, 5.0);
alertDefinitionEntity.setSource(rootJson.toString());
alertDefinitionDAO.merge(alertDefinitionEntity);
}
}
}
}
}
protected Provider<EntityManager> getEntityManagerProvider() {
return injector.getProvider(EntityManager.class);
}
protected void executeInTransaction(Runnable func) {
EntityManager entityManager = getEntityManagerProvider().get();
if (entityManager.getTransaction().isActive()) { //already started, reuse
func.run();
} else {
entityManager.getTransaction().begin();
try {
func.run();
entityManager.getTransaction().commit();
// This is required because some of the entities actively managed by
// the persistence context will remain unaware of the actual changes
// occurring at the database level. Some UpgradeCatalogs perform
// update / delete using CriteriaBuilder directly.
entityManager.getEntityManagerFactory().getCache().evictAll();
} catch (Exception e) {
LOG.error("Error in transaction ", e);
if (entityManager.getTransaction().isActive()) {
entityManager.getTransaction().rollback();
}
throw new RuntimeException(e);
}
}
}
protected void changePostgresSearchPath() throws SQLException {
String dbUser = configuration.getDatabaseUser();
String schemaName = configuration.getServerJDBCPostgresSchemaName();
if (null != dbUser && !dbUser.equals("") && null != schemaName && !schemaName.equals("")) {
// Wrap username with double quotes to accept old username "ambari-server"
if (!dbUser.contains("\"")) {
dbUser = String.format("\"%s\"", dbUser);
}
dbAccessor.executeQuery(String.format("ALTER SCHEMA %s OWNER TO %s;", schemaName, dbUser));
dbAccessor.executeQuery(String.format("ALTER ROLE %s SET search_path to '%s';", dbUser, schemaName));
}
}
public void addNewConfigurationsFromXml() throws AmbariException {
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
Clusters clusters = controller.getClusters();
if (clusters == null) {
return;
}
Map<String, Cluster> clusterMap = clusters.getClusters();
if (clusterMap != null && !clusterMap.isEmpty()) {
for (Cluster cluster : clusterMap.values()) {
Map<String, Set<String>> toAddProperties = new HashMap<String, Set<String>>();
Map<String, Set<String>> toUpdateProperties = new HashMap<String, Set<String>>();
Map<String, Set<String>> toRemoveProperties = new HashMap<String, Set<String>>();
Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
for(String serviceName: cluster.getServices().keySet()) {
Set<PropertyInfo> properties = configHelper.getServiceProperties(cluster, serviceName);
if (properties == null) {
continue;
}
properties.addAll(stackProperties);
for (PropertyInfo property : properties) {
String configType = ConfigHelper.fileNameToConfigType(property.getFilename());
Config clusterConfigs = cluster.getDesiredConfigByType(configType);
PropertyUpgradeBehavior upgradeBehavior = property.getPropertyAmbariUpgradeBehavior();
if (property.isDeleted()) {
// Do nothing
} else if (upgradeBehavior.isDelete()) {
if (!toRemoveProperties.containsKey(configType)) {
toRemoveProperties.put(configType, new HashSet<String>());
}
toRemoveProperties.get(configType).add(property.getName());
} else if (upgradeBehavior.isUpdate()) {
if (!toUpdateProperties.containsKey(configType)) {
toUpdateProperties.put(configType, new HashSet<String>());
}
toUpdateProperties.get(configType).add(property.getName());
} else if (upgradeBehavior.isAdd()) {
if (!toAddProperties.containsKey(configType)) {
toAddProperties.put(configType, new HashSet<String>());
}
toAddProperties.get(configType).add(property.getName());
}
}
}
for (Entry<String, Set<String>> newProperty : toAddProperties.entrySet()) {
String newPropertyKey = newProperty.getKey();
updateConfigurationPropertiesWithValuesFromXml(newPropertyKey, newProperty.getValue(), false, true);
}
for (Entry<String, Set<String>> newProperty : toUpdateProperties.entrySet()) {
String newPropertyKey = newProperty.getKey();
updateConfigurationPropertiesWithValuesFromXml(newPropertyKey, newProperty.getValue(), true, false);
}
for (Entry<String, Set<String>> toRemove : toRemoveProperties.entrySet()) {
String newPropertyKey = toRemove.getKey();
updateConfigurationPropertiesWithValuesFromXml(newPropertyKey, Collections.<String>emptySet(), toRemove.getValue(), false, true);
}
}
}
}
protected boolean isNNHAEnabled(Cluster cluster) {
Config hdfsSiteConfig = cluster.getDesiredConfigByType(CONFIGURATION_TYPE_HDFS_SITE);
if (hdfsSiteConfig != null) {
Map<String, String> properties = hdfsSiteConfig.getProperties();
String nameServices = properties.get(PROPERTY_DFS_NAMESERVICES);
if (!StringUtils.isEmpty(nameServices)) {
String namenodes = properties.get(String.format("dfs.ha.namenodes.%s", nameServices));
if (!StringUtils.isEmpty(namenodes)) {
return (namenodes.split(",").length > 1);
}
}
}
return false;
}
/**
* This method returns Map of clusters.
* Map can be empty or with some objects, but never be null.
*/
protected Map<String, Cluster> getCheckedClusterMap(Clusters clusters) {
if (clusters != null) {
Map<String, Cluster> clusterMap = clusters.getClusters();
if (clusterMap != null) {
return clusterMap;
}
}
return new HashMap<>();
}
/**
* Create a new cluster scoped configuration with the new properties added
* with the values from the coresponding xml files.
*
* If xml owner service is not in the cluster, the configuration won't be added.
*
* @param configType Configuration type. (hdfs-site, etc.)
* @param propertyNames Set property names.
*/
protected void updateConfigurationPropertiesWithValuesFromXml(String configType,
Set<String> propertyNames, boolean updateIfExists, boolean createNewConfigType) throws AmbariException {
updateConfigurationPropertiesWithValuesFromXml(configType, propertyNames, null, updateIfExists, createNewConfigType);
}
protected void updateConfigurationPropertiesWithValuesFromXml(String configType,
Set<String> propertyNames,
Set<String> toRemove,
boolean updateIfExists,
boolean createNewConfigType) throws AmbariException {
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
Clusters clusters = controller.getClusters();
if (clusters == null) {
return;
}
Map<String, Cluster> clusterMap = clusters.getClusters();
if (clusterMap != null && !clusterMap.isEmpty()) {
for (Cluster cluster : clusterMap.values()) {
Map<String, String> properties = new HashMap<String, String>();
for(String propertyName:propertyNames) {
String propertyValue = configHelper.getPropertyValueFromStackDefinitions(cluster, configType, propertyName);
if(propertyValue == null) {
LOG.info("Config " + propertyName + " from " + configType + " is not found in xml definitions." +
"Skipping configuration property update");
continue;
}
ServiceInfo propertyService = configHelper.getPropertyOwnerService(cluster, configType, propertyName);
if(propertyService != null && !cluster.getServices().containsKey(propertyService.getName())) {
LOG.info("Config " + propertyName + " from " + configType + " with value = " + propertyValue + " " +
"Is not added due to service " + propertyService.getName() + " is not in the cluster.");
continue;
}
properties.put(propertyName, propertyValue);
}
updateConfigurationPropertiesForCluster(cluster, configType,
properties, toRemove, updateIfExists, createNewConfigType);
}
}
}
/**
* Update properties for the cluster
* @param cluster cluster object
* @param configType config to be updated
* @param properties properties to be added or updated. Couldn't be <code>null</code>, but could be empty.
* @param removePropertiesList properties to be removed. Could be <code>null</code>
* @param updateIfExists
* @param createNewConfigType
* @throws AmbariException
*/
protected void updateConfigurationPropertiesForCluster(Cluster cluster, String configType,
Map<String, String> properties, Set<String> removePropertiesList, boolean updateIfExists,
boolean createNewConfigType) throws AmbariException {
AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
String newTag = "version" + System.currentTimeMillis();
if (properties != null) {
Map<String, Config> all = cluster.getConfigsByType(configType);
if (all == null || !all.containsKey(newTag) || properties.size() > 0) {
Map<String, String> oldConfigProperties;
Config oldConfig = cluster.getDesiredConfigByType(configType);
if (oldConfig == null && !createNewConfigType) {
LOG.info("Config " + configType + " not found. Assuming service not installed. " +
"Skipping configuration properties update");
return;
} else if (oldConfig == null) {
oldConfigProperties = new HashMap<String, String>();
newTag = "version1";
} else {
oldConfigProperties = oldConfig.getProperties();
}
Map<String, String> mergedProperties =
mergeProperties(oldConfigProperties, properties, updateIfExists);
if (removePropertiesList != null) {
mergedProperties = removeProperties(mergedProperties, removePropertiesList);
}
if (!Maps.difference(oldConfigProperties, mergedProperties).areEqual()) {
LOG.info("Applying configuration with tag '{}' to " +
"cluster '{}'", newTag, cluster.getClusterName());
Map<String, Map<String, String>> propertiesAttributes = null;
if (oldConfig != null) {
propertiesAttributes = oldConfig.getPropertiesAttributes();
}
// the contract of creating a configuration requires non-null
// collections for attributes
if (null == propertiesAttributes) {
propertiesAttributes = Collections.emptyMap();
}
controller.createConfig(cluster, configType, mergedProperties, newTag, propertiesAttributes);
Config baseConfig = cluster.getConfig(configType, newTag);
if (baseConfig != null) {
String authName = AUTHENTICATED_USER_NAME;
if (cluster.addDesiredConfig(authName, Collections.singleton(baseConfig)) != null) {
String oldConfigString = (oldConfig != null) ? " from='" + oldConfig.getTag() + "'" : "";
LOG.info("cluster '" + cluster.getClusterName() + "' "
+ "changed by: '" + authName + "'; "
+ "type='" + baseConfig.getType() + "' "
+ "tag='" + baseConfig.getTag() + "'"
+ oldConfigString);
}
}
} else {
LOG.info("No changes detected to config " + configType + ". Skipping configuration properties update");
}
}
}
}
protected void updateConfigurationPropertiesForCluster(Cluster cluster, String configType,
Map<String, String> properties, boolean updateIfExists, boolean createNewConfigType) throws AmbariException {
updateConfigurationPropertiesForCluster(cluster, configType, properties, null, updateIfExists, createNewConfigType);
}
/**
* Remove properties from the cluster
* @param cluster cluster object
* @param configType config to be updated
* @param removePropertiesList properties to be removed. Could be <code>null</code>
* @throws AmbariException
*/
protected void removeConfigurationPropertiesFromCluster(Cluster cluster, String configType, Set<String> removePropertiesList)
throws AmbariException {
updateConfigurationPropertiesForCluster(cluster, configType, new HashMap<String, String>(), removePropertiesList, false, true);
}
/**
* Create a new cluster scoped configuration with the new properties added
* to the existing set of properties.
* @param configType Configuration type. (hdfs-site, etc.)
* @param properties Map of key value pairs to add / update.
*/
protected void updateConfigurationProperties(String configType,
Map<String, String> properties, boolean updateIfExists, boolean createNewConfigType) throws
AmbariException {
AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
Clusters clusters = controller.getClusters();
if (clusters == null) {
return;
}
Map<String, Cluster> clusterMap = clusters.getClusters();
if (clusterMap != null && !clusterMap.isEmpty()) {
for (Cluster cluster : clusterMap.values()) {
updateConfigurationPropertiesForCluster(cluster, configType,
properties, updateIfExists, createNewConfigType);
}
}
}
private Map<String, String> mergeProperties(Map<String, String> originalProperties,
Map<String, String> newProperties,
boolean updateIfExists) {
Map<String, String> properties = new HashMap<String, String>(originalProperties);
for (Map.Entry<String, String> entry : newProperties.entrySet()) {
if (!properties.containsKey(entry.getKey()) || updateIfExists) {
properties.put(entry.getKey(), entry.getValue());
}
}
return properties;
}
private Map<String, String> removeProperties(Map<String, String> originalProperties, Set<String> removeList){
Map<String, String> properties = new HashMap<String, String>();
properties.putAll(originalProperties);
for (String removeProperty: removeList){
if (originalProperties.containsKey(removeProperty)){
properties.remove(removeProperty);
}
}
return properties;
}
/**
* Iterates through a collection of AbstractKerberosDescriptorContainers to find and update
* identity descriptor references.
*
* @param descriptorMap a String to AbstractKerberosDescriptorContainer map to iterate trough
* @param referenceName the reference name to change
* @param newReferenceName the new reference name
*/
protected void updateKerberosDescriptorIdentityReferences(Map<String, ? extends AbstractKerberosDescriptorContainer> descriptorMap,
String referenceName,
String newReferenceName) {
if (descriptorMap != null) {
for (AbstractKerberosDescriptorContainer kerberosServiceDescriptor : descriptorMap.values()) {
updateKerberosDescriptorIdentityReferences(kerberosServiceDescriptor, referenceName, newReferenceName);
if (kerberosServiceDescriptor instanceof KerberosServiceDescriptor) {
updateKerberosDescriptorIdentityReferences(((KerberosServiceDescriptor) kerberosServiceDescriptor).getComponents(),
referenceName, newReferenceName);
}
}
}
}
/**
* Given an AbstractKerberosDescriptorContainer, iterates through its contained identity descriptors
* to find ones matching the reference name to change.
* <p/>
* If found, the reference name is updated to the new name.
*
* @param descriptorContainer the AbstractKerberosDescriptorContainer to update
* @param referenceName the reference name to change
* @param newReferenceName the new reference name
*/
protected void updateKerberosDescriptorIdentityReferences(AbstractKerberosDescriptorContainer descriptorContainer,
String referenceName,
String newReferenceName) {
if (descriptorContainer != null) {
KerberosIdentityDescriptor identity = descriptorContainer.getIdentity(referenceName);
if (identity != null) {
identity.setName(newReferenceName);
}
}
}
/**
* Update the stored Kerberos Descriptor artifacts to conform to the new structure.
* <p/>
* Finds the relevant artifact entities and iterates through them to process each independently.
*/
protected void updateKerberosDescriptorArtifacts() throws AmbariException {
ArtifactDAO artifactDAO = injector.getInstance(ArtifactDAO.class);
List<ArtifactEntity> artifactEntities = artifactDAO.findByName("kerberos_descriptor");
if (artifactEntities != null) {
for (ArtifactEntity artifactEntity : artifactEntities) {
updateKerberosDescriptorArtifact(artifactDAO, artifactEntity);
}
}
}
/**
* Retrieve the composite Kerberos Descriptor.
* <p>
* The composite Kerberos Descriptor is the cluster's stack-specific Kerberos Descriptor overlaid
* with changes specified by the user via the cluster's Kerberos Descriptor artifact.
*
* @param cluster the relevant cluster
* @return the composite Kerberos Descriptor
* @throws AmbariException
*/
protected KerberosDescriptor getKerberosDescriptor(Cluster cluster) throws AmbariException {
// Get the Stack-defined Kerberos Descriptor (aka default Kerberos Descriptor)
AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
StackId stackId = cluster.getCurrentStackVersion();
KerberosDescriptor defaultDescriptor = ambariMetaInfo.getKerberosDescriptor(stackId.getStackName(), stackId.getStackVersion());
// Get the User-set Kerberos Descriptor
ArtifactDAO artifactDAO = injector.getInstance(ArtifactDAO.class);
KerberosDescriptor artifactDescriptor = null;
ArtifactEntity artifactEntity = artifactDAO.findByNameAndForeignKeys("kerberos_descriptor",
new TreeMap<String, String>(Collections.singletonMap("cluster", String.valueOf(cluster.getClusterId()))));
if (artifactEntity != null) {
Map<String, Object> data = artifactEntity.getArtifactData();
if (data != null) {
artifactDescriptor = new KerberosDescriptorFactory().createInstance(data);
}
}
// Calculate and return the composite Kerberos Descriptor
if (defaultDescriptor == null) {
return artifactDescriptor;
} else if (artifactDescriptor == null) {
return defaultDescriptor;
} else {
defaultDescriptor.update(artifactDescriptor);
return defaultDescriptor;
}
}
/**
* Add a new role authorization and optionally add it to 1 or more roles.
* <p>
* The collection of roles to add the new role authorization to may be null or empty, indicating
* that no roles are to be altered. If set, though, each role entry in the collection must be a
* colon-delimited string like: <code>ROLE:RESOURCE TYPE</code>. Examples:
* <ul>
* <li>"AMBARI.ADMINISTRATOR:AMBARI"</li>
* <li>"CLUSTER.ADMINISTRATOR:CLUSTER"</li>
* <li>"SERVICE.OPERATOR:CLUSTER"</li>
* </ul>
*
* @param roleAuthorizationID the ID of the new authorization
* @param roleAuthorizationName the (descriptive) name of the new authorization
* @param applicableRoles an optional collection of role specification to add the new authorization to
* @throws SQLException
*/
protected void addRoleAuthorization(String roleAuthorizationID, String roleAuthorizationName, Collection<String> applicableRoles) throws SQLException {
if (!StringUtils.isEmpty(roleAuthorizationID)) {
RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
RoleAuthorizationEntity roleAuthorization = roleAuthorizationDAO.findById(roleAuthorizationID);
if (roleAuthorization == null) {
roleAuthorization = new RoleAuthorizationEntity();
roleAuthorization.setAuthorizationId(roleAuthorizationID);
roleAuthorization.setAuthorizationName(roleAuthorizationName);
roleAuthorizationDAO.create(roleAuthorization);
}
if ((applicableRoles != null) && (!applicableRoles.isEmpty())) {
for (String role : applicableRoles) {
String[] parts = role.split("\\:");
addAuthorizationToRole(parts[0], parts[1], roleAuthorization);
}
}
}
}
/**
* Add a new authorization to the set of authorizations for a role
*
* @param roleName the name of the role
* @param resourceType the resource type of the role (AMBARI, CLUSTER, VIEW, etc...)
* @param roleAuthorizationID the ID of the authorization
* @see #addAuthorizationToRole(String, String, RoleAuthorizationEntity)
*/
protected void addAuthorizationToRole(String roleName, String resourceType, String roleAuthorizationID) {
if (!StringUtils.isEmpty(roleAuthorizationID)) {
RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
RoleAuthorizationEntity roleAuthorization = roleAuthorizationDAO.findById(roleAuthorizationID);
if (roleAuthorization != null) {
addAuthorizationToRole(roleName, resourceType, roleAuthorization);
}
}
}
/**
* Add a new authorization to the set of authorizations for a role
*
* @param roleName the name of the role
* @param resourceType the resource type of the role (AMBARI, CLUSTER, VIEW, etc...)
* @param roleAuthorization the authorization to add
*/
protected void addAuthorizationToRole(String roleName, String resourceType, RoleAuthorizationEntity roleAuthorization) {
if ((roleAuthorization != null) && !StringUtils.isEmpty(roleName) && !StringUtils.isEmpty(resourceType)) {
PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
PermissionEntity role = permissionDAO.findPermissionByNameAndType(roleName, resourceTypeDAO.findByName(resourceType));
if (role != null) {
role.getAuthorizations().add(roleAuthorization);
permissionDAO.merge(role);
}
}
}
/**
* Add a new authorization to the set of authorizations for a role
*
* @param role the role to add the authorization to
* @param roleAuthorizationID the authorization to add
*/
protected void addAuthorizationToRole(PermissionEntity role, String roleAuthorizationID) {
if ((role != null) && !StringUtils.isEmpty(roleAuthorizationID)) {
RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
RoleAuthorizationEntity roleAuthorization = roleAuthorizationDAO.findById(roleAuthorizationID);
if (roleAuthorization != null) {
PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
role.getAuthorizations().add(roleAuthorization);
permissionDAO.merge(role);
}
}
}
/**
* Update the specified Kerberos Descriptor artifact to conform to the new structure.
* <p/>
* On ambari version update some of identities can be moved between scopes(e.g. from service to component), so
* old identity need to be moved to proper place and all references for moved identity need to be updated.
* <p/>
* By default descriptor remains unchanged and this method must be overridden in child UpgradeCatalog to meet new
* ambari version changes in kerberos descriptors.
* <p/>
* The supplied ArtifactEntity is updated in place a merged back into the database.
*
* @param artifactDAO the ArtifactDAO to use to store the updated ArtifactEntity
* @param artifactEntity the ArtifactEntity to update
*/
protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
// NOOP
}
@Override
public void upgradeSchema() throws AmbariException, SQLException {
DatabaseType databaseType = configuration.getDatabaseType();
if (databaseType == DatabaseType.POSTGRES) {
changePostgresSearchPath();
}
executeDDLUpdates();
}
@Override
public void preUpgradeData() throws AmbariException, SQLException {
executePreDMLUpdates();
}
@Override
public void upgradeData() throws AmbariException, SQLException {
executeDMLUpdates();
updateTezHistoryUrlBase();
}
/**
* Version of the Tez view changes with every new version on Ambari. Hence the 'tez.tez-ui.history-url.base' in tez-site.xml
* has to be changed every time ambari update happens. This will read the latest tez-view jar file and find out the
* view version by reading the view.xml file inside it and update the 'tez.tez-ui.history-url.base' property in tez-site.xml
* with the proper value of the updated tez view version.
*/
private void updateTezHistoryUrlBase() throws AmbariException {
AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
Clusters clusters = ambariManagementController.getClusters();
if (clusters != null) {
Map<String, Cluster> clusterMap = clusters.getClusters();
if (clusterMap != null && !clusterMap.isEmpty()) {
for (final Cluster cluster : clusterMap.values()) {
Set<String> installedServices = cluster.getServices().keySet();
if (installedServices.contains("TEZ")) {
Config tezSite = cluster.getDesiredConfigByType("tez-site");
if (tezSite != null) {
String currentTezHistoryUrlBase = tezSite.getProperties().get("tez.tez-ui.history-url.base");
if (!StringUtils.isEmpty(currentTezHistoryUrlBase)) {
String newTezHistoryUrlBase = getUpdatedTezHistoryUrlBase(currentTezHistoryUrlBase);
updateConfigurationProperties("tez-site", Collections.singletonMap("tez.tez-ui.history-url.base", newTezHistoryUrlBase), true, false);
}
}
}
}
}
}
}
/**
* Transforms the existing tez history url base to the new url considering the latest tez view version.
* @param currentTezHistoryUrlBase Existing value of the tez history url base
* @return the updated tez history url base
* @throws AmbariException if currentTezHistoryUrlBase is malformed or is not compatible with the Tez View url REGEX
*/
protected String getUpdatedTezHistoryUrlBase(String currentTezHistoryUrlBase) throws AmbariException{
String pattern = "(.*\\/TEZ\\/)(.*)(\\/TEZ_CLUSTER_INSTANCE)";
Pattern regex = Pattern.compile(pattern);
Matcher matcher = regex.matcher(currentTezHistoryUrlBase);
String prefix;
String suffix;
String oldVersion;
if (matcher.find()) {
prefix = matcher.group(1);
oldVersion = matcher.group(2);
suffix = matcher.group(3);
} else {
throw new AmbariException("Cannot prepare the new value for property: 'tez.tez-ui.history-url.base' using the old value: '" + currentTezHistoryUrlBase + "'");
}
String latestTezViewVersion = getLatestTezViewVersion(oldVersion);
return prefix + latestTezViewVersion + suffix;
}
/**
* Given the old configured version, this method tries to get the new version of tez view by reading the tez-view jar.
* Assumption - only a single tez-view jar will be present in the views directory.
* @param oldVersion It is returned if there is a failure in finding the new version
* @return newVersion of the tez view. Returns oldVersion if there error encountered if finding the new version number.
*/
protected String getLatestTezViewVersion(String oldVersion) {
File viewsDirectory = configuration.getViewsDir();
File[] files = viewsDirectory.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith("tez-view");
}
});
if(files == null || files.length == 0) {
LOG.error("Could not file tez-view jar file in '{}'. Returning the old version", viewsDirectory.getAbsolutePath());
return oldVersion;
}
File tezViewFile = files[0];
try {
ViewConfig viewConfigFromArchive = archiveUtility.getViewConfigFromArchive(tezViewFile);
return viewConfigFromArchive.getVersion();
} catch (JAXBException | IOException e) {
LOG.error("Failed to read the tez view version from: {}. Returning the old version", tezViewFile);
return oldVersion;
}
}
@Override
public final void updateDatabaseSchemaVersion() {
updateMetaInfoVersion(getTargetVersion());
}
@Override
public boolean isFinal() {
return false;
}
protected abstract void executeDDLUpdates() throws AmbariException, SQLException;
/**
* Perform data insertion before running normal upgrade of data, requires started persist service
* @throws AmbariException
* @throws SQLException
*/
protected abstract void executePreDMLUpdates() throws AmbariException, SQLException;
protected abstract void executeDMLUpdates() throws AmbariException, SQLException;
@Override
public String toString() {
return "{ upgradeCatalog: sourceVersion = " + getSourceVersion() + ", " +
"targetVersion = " + getTargetVersion() + " }";
}
/**
* {@inheritDoc}
*/
@Override
public void onPostUpgrade() throws AmbariException, SQLException {
// NOOP
}
/**
* Validate queueNameProperty exists for configType in cluster and corresponds to one of validLeafQueues
* @param cluster cluster to operate with
* @param validLeafQueues Set of YARN capacity-scheduler leaf queues
* @param queueNameProperty queue name property to check and update
* @param configType config type name
* @return
*/
protected boolean isQueueNameValid(Cluster cluster, Set<String> validLeafQueues, String queueNameProperty, String configType) {
Config site = cluster.getDesiredConfigByType(configType);
Map<String, String> properties = site.getProperties();
boolean result = properties.containsKey(queueNameProperty) && validLeafQueues.contains(properties.get(queueNameProperty));
if (!result){
LOG.info("Queue name " + queueNameProperty + " in " + configType + " not defined or not corresponds to valid capacity-scheduler queue");
}
return result;
}
/**
* Update property queueNameProperty from configType of cluster to first of validLeafQueues
* @param cluster cluster to operate with
* @param validLeafQueues Set of YARN capacity-scheduler leaf queues
* @param queueNameProperty queue name property to check and update
* @param configType config type name
* @throws AmbariException if an error occurs while updating the configurations
*/
protected void updateQueueName(Cluster cluster, Set<String> validLeafQueues, String queueNameProperty, String configType) throws AmbariException {
String recommendQueue = validLeafQueues.iterator().next();
LOG.info("Update " + queueNameProperty + " in " + configType + " set to " + recommendQueue);
Map<String, String> updates = Collections.singletonMap(queueNameProperty, recommendQueue);
updateConfigurationPropertiesForCluster(cluster, configType, updates, true, true);
}
/**
* Pars Capacity Scheduler Properties and get all YARN Capacity Scheduler leaf queue names
* @param capacitySchedulerMap capacity-scheduler properties map
* @return all YARN Capacity Scheduler leaf queue names
*/
protected Set<String> getCapacitySchedulerLeafQueues(Map<String, String> capacitySchedulerMap) {
Set<String> leafQueues= new HashSet<>();
Stack<String> toProcessQueues = new Stack<>();
if (capacitySchedulerMap.containsKey(YARN_SCHEDULER_CAPACITY_ROOT_QUEUES)){
StringTokenizer queueTokenizer = new StringTokenizer(capacitySchedulerMap.get(
YARN_SCHEDULER_CAPACITY_ROOT_QUEUES), ",");
while (queueTokenizer.hasMoreTokens()){
toProcessQueues.push(queueTokenizer.nextToken());
}
}
while (!toProcessQueues.empty()){
String queue = toProcessQueues.pop();
String queueKey = YARN_SCHEDULER_CAPACITY_ROOT_QUEUE + "." + queue + "." + QUEUES;
if (capacitySchedulerMap.containsKey(queueKey)){
StringTokenizer queueTokenizer = new StringTokenizer(capacitySchedulerMap.get(queueKey), ",");
while (queueTokenizer.hasMoreTokens()){
toProcessQueues.push(queue + "." + queueTokenizer.nextToken());
}
} else {
if (!queue.endsWith(".")){
String queueName = queue.substring(queue.lastIndexOf('.')+1);
leafQueues.add(queueName);
} else {
LOG.warn("Queue " + queue + " is not valid");
}
}
}
return leafQueues;
}
}