AMBARI-21781. Error: Repositories for os type redhat-ppc7 are not defined during package installation (ncole)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index b489d7f..aba8b9c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -506,16 +506,15 @@
final AmbariManagementController managementController = getManagementController();
final AmbariMetaInfo ami = managementController.getAmbariMetaInfo();
- String osFamily = getPowerPCOsFamily(hosts);
// build the list of OS repos
List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
for (OperatingSystemEntity operatingSystem : operatingSystems) {
- String osType = getOsTypeForRepo(operatingSystem, osFamily);
- if (operatingSystem.isAmbariManagedRepos()) {
- perOsRepos.put(osType, operatingSystem.getRepositories());
+
+ if (operatingSystem.isAmbariManagedRepos()) {
+ perOsRepos.put(operatingSystem.getOsType(), operatingSystem.getRepositories());
} else {
- perOsRepos.put(osType, Collections.<RepositoryEntity> emptyList());
+ perOsRepos.put(operatingSystem.getOsType(), Collections.<RepositoryEntity> emptyList());
}
}
@@ -1089,47 +1088,4 @@
amc.getAuthName(), serviceNote);
}
}
-
- /**
- * Check one host is enough to tell the arch
- * because all hosts should have the same arch.
- * @param hosts List<Host>
- * @return osFamily, null if hosts is empty or is X86_64
-
- */
- private String getPowerPCOsFamily(List<Host> hosts) {
- if (hosts.isEmpty()){
- return null;
- } else {
- Host host = hosts.get(0);
- String osFamily = host.getHostAttributes().get("os_family");
- if (null != osFamily && osFamily.endsWith("-ppc")){
- return osFamily;
- } else {
- return null;
- }
- }
- }
-
- /**
- * Use os type with -ppc post fix for powerpc
- * in order to have it consistent with the os information
- * stored in the Hosts table
- * No need to apply the change if os is x86_64
- * */
- private String getOsTypeForRepo(OperatingSystemEntity operatingSystem, String osFamily) {
- if (null != osFamily){
- String osType = operatingSystem.getOsType();
- int pos = osFamily.indexOf("-ppc");
- if (pos > 0){
- String os = osType.substring(0, pos);
- String majorVersion = osType.substring(os.length());
- return String.format("%s-ppc%s", os, majorVersion);
- } else {
- return operatingSystem.getOsType();
- }
- } else {
- return operatingSystem.getOsType();
- }
- }
}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 68596e0..5627756 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -117,6 +117,7 @@
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
+import com.google.common.collect.ImmutableMap;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
@@ -1795,6 +1796,215 @@
verify(managementController, response, clusters, cluster, hostVersionDAO);
}
+ @Test
+ public void testCreateResourcesPPC() throws Exception {
+ Resource.Type type = Resource.Type.ClusterStackVersion;
+
+ AmbariManagementController managementController = createMock(AmbariManagementController.class);
+ Clusters clusters = createNiceMock(Clusters.class);
+ Cluster cluster = createNiceMock(Cluster.class);
+ Map<String, String> hostLevelParams = new HashMap<>();
+ StackId stackId = new StackId("HDP", "2.0.1");
+
+ RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
+ repoVersion.setId(1l);
+
+ String os_json = "[\n" +
+ " {\n" +
+ " \"repositories\":[\n" +
+ " {\n" +
+ " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos-ppc7/2.x/updates/2.2.0.0\",\n" +
+ " \"Repositories/repo_name\":\"HDP-UTILS\",\n" +
+ " \"Repositories/repo_id\":\"HDP-UTILS-1.1.0.20\"\n" +
+ " },\n" +
+ " {\n" +
+ " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos-ppc7/2.x/updates/2.2.0.0\",\n" +
+ " \"Repositories/repo_name\":\"HDP\",\n" +
+ " \"Repositories/repo_id\":\"HDP-2.2\"\n" +
+ " }\n" +
+ " ],\n" +
+ " \"OperatingSystems/os_type\":\"redhat-ppc7\"\n" +
+ " }\n" +
+ "]";
+
+ repoVersion.setOperatingSystems(os_json);
+
+ Map<String, Host> hostsForCluster = new HashMap<>();
+ int hostCount = 2;
+ for (int i = 0; i < hostCount; i++) {
+ String hostname = "host" + i;
+ Host host = createNiceMock(hostname, Host.class);
+ expect(host.getHostName()).andReturn(hostname).anyTimes();
+ expect(host.getOsFamily()).andReturn("redhat-ppc7").anyTimes();
+ expect(host.getMaintenanceState(EasyMock.anyLong())).andReturn(
+ MaintenanceState.OFF).anyTimes();
+ expect(host.getAllHostVersions()).andReturn(
+ Collections.<HostVersionEntity>emptyList()).anyTimes();
+ expect(host.getHostAttributes()).andReturn(
+ ImmutableMap.<String, String>builder()
+ .put("os_family", "redhat-ppc")
+ .put("os_release_version", "7.2")
+ .build()
+ ).anyTimes();
+ replay(host);
+ hostsForCluster.put(hostname, host);
+ }
+
+ final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
+ expect(schDatanode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schDatanode.getServiceComponentName()).andReturn("DATANODE").anyTimes();
+ final ServiceComponentHost schNamenode = createMock(ServiceComponentHost.class);
+ expect(schNamenode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schNamenode.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+ final ServiceComponentHost schAMS = createMock(ServiceComponentHost.class);
+ expect(schAMS.getServiceName()).andReturn("AMBARI_METRICS").anyTimes();
+ expect(schAMS.getServiceComponentName()).andReturn("METRICS_COLLECTOR").anyTimes();
+ // First host contains versionable components
+ final List<ServiceComponentHost> schsH1 = new ArrayList<ServiceComponentHost>(){{
+ add(schDatanode);
+ add(schNamenode);
+ add(schAMS);
+ }};
+ // Second host does not contain versionable components
+ final List<ServiceComponentHost> schsH2 = new ArrayList<ServiceComponentHost>(){{
+ add(schAMS);
+ }};
+
+
+ ServiceOsSpecific.Package hdfsPackage = new ServiceOsSpecific.Package();
+ hdfsPackage.setName("hdfs");
+ List<ServiceOsSpecific.Package> packages = Collections.singletonList(hdfsPackage);
+
+ ActionManager actionManager = createNiceMock(ActionManager.class);
+
+ RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+ ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
+ ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
+
+ AbstractControllerResourceProvider.init(resourceProviderFactory);
+
+ Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
+ expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+ expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+ expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+ expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+ expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+ expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+ (Map<String, String>) anyObject(List.class), anyObject(String.class))).
+ andReturn(packages).anyTimes(); // 1 host has no versionable components, other hosts have 2 services
+// // that's why we don't send commands to it
+
+ expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
+ eq(managementController))).andReturn(csvResourceProvider).anyTimes();
+
+ expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+ expect(clusters.getHostsForCluster(anyObject(String.class))).andReturn(
+ hostsForCluster).anyTimes();
+
+ String clusterName = "Cluster100";
+ expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+ expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+ expect(cluster.getServices()).andReturn(new HashMap<String, Service>()).anyTimes();
+ expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+ expect(cluster.getServiceComponentHosts(anyObject(String.class))).andAnswer(new IAnswer<List<ServiceComponentHost>>() {
+ @Override
+ public List<ServiceComponentHost> answer() throws Throwable {
+ String hostname = (String) EasyMock.getCurrentArguments()[0];
+ if (hostname.equals("host2")) {
+ return schsH2;
+ } else {
+ return schsH1;
+ }
+ }
+ }).anyTimes();
+
+ ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
+ ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
+
+ expect(executionCommandWrapper.getExecutionCommand()).andReturn(executionCommand).anyTimes();
+
+ Stage stage = createNiceMock(Stage.class);
+ expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
+ andReturn(executionCommandWrapper).anyTimes();
+
+ expect(executionCommand.getHostLevelParams()).andReturn(hostLevelParams).anyTimes();
+
+ Map<Role, Float> successFactors = new HashMap<>();
+ expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
+
+ // Check that we create proper stage count
+ expect(stageFactory.createNew(anyLong(), anyObject(String.class),
+ anyObject(String.class), anyLong(),
+ anyObject(String.class), anyObject(String.class),
+ anyObject(String.class))).andReturn(stage).
+ times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
+
+ expect(
+ repositoryVersionDAOMock.findByStackAndVersion(
+ anyObject(StackId.class),
+ anyObject(String.class))).andReturn(repoVersion);
+
+ expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterId(1l);
+ clusterEntity.setClusterName(clusterName);
+ ClusterVersionEntity cve = new ClusterVersionEntity(clusterEntity,
+ repoVersion, RepositoryVersionState.INSTALL_FAILED, 0, "");
+ expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
+ anyObject(StackId.class), anyObject(String.class))).andReturn(cve);
+
+ StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+ StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+ expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>emptyList()).once();
+
+ // replay
+ replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
+ cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schAMS, actionManager,
+ executionCommand, executionCommandWrapper,stage, stageFactory, clusterVersionDAO);
+
+ ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+ type,
+ PropertyHelper.getPropertyIds(type),
+ PropertyHelper.getKeyPropertyIds(type),
+ managementController);
+
+ injector.injectMembers(provider);
+
+ // add the property map to a set for the request. add more maps for multiple creates
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
+
+ Map<String, Object> properties = new LinkedHashMap<>();
+
+ // add properties to the request map
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "2.2.0.1-885");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, "HDP");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, "2.1.1");
+
+ propertySet.add(properties);
+
+ // create the request
+ Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+ SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator());
+
+ RequestStatus status = provider.createResources(request);
+ Assert.assertNotNull(status);
+
+ // verify
+ verify(managementController, response, clusters, stageFactory, stage);
+
+ // check that the success factor was populated in the stage
+ Float successFactor = successFactors.get(Role.INSTALL_PACKAGES);
+ Assert.assertEquals(Float.valueOf(0.85f), successFactor);
+ }
+
+
+
private void testCreateResourcesExistingUpgrade(Authentication authentication) throws Exception {
Resource.Type type = Resource.Type.ClusterStackVersion;