STORM-3434: server: fix all checkstyle warnings (#3050)
diff --git a/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj b/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
index a9e843c..2808770 100644
--- a/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
+++ b/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
@@ -104,14 +104,14 @@
ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 3)
ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 4)
ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 5)]
- (is (= 1 (._nodes ns-count-1)))
- (is (= 4 (._slots ns-count-1)))
- (is (= 1 (._nodes ns-count-3)))
- (is (= 4 (._slots ns-count-3)))
- (is (= 1 (._nodes ns-count-4)))
- (is (= 4 (._slots ns-count-4)))
- (is (= 2 (._nodes ns-count-5)))
- (is (= 8 (._slots ns-count-5)))
+ (is (= 1 (.nodes ns-count-1)))
+ (is (= 4 (.slots ns-count-1)))
+ (is (= 1 (.nodes ns-count-3)))
+ (is (= 4 (.slots ns-count-3)))
+ (is (= 1 (.nodes ns-count-4)))
+ (is (= 4 (.slots ns-count-4)))
+ (is (= 2 (.nodes ns-count-5)))
+ (is (= 8 (.slots ns-count-5)))
)
(let [nodes (.takeNodesBySlots free-pool 5)]
(is (= 2 (.size nodes)))
@@ -347,14 +347,14 @@
ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 3)
ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 4)
ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 5)]
- (is (= 1 (._nodes ns-count-1)))
- (is (= 4 (._slots ns-count-1)))
- (is (= 1 (._nodes ns-count-3)))
- (is (= 4 (._slots ns-count-3)))
- (is (= 1 (._nodes ns-count-4)))
- (is (= 4 (._slots ns-count-4)))
- (is (= 2 (._nodes ns-count-5)))
- (is (= 8 (._slots ns-count-5)))
+ (is (= 1 (.nodes ns-count-1)))
+ (is (= 4 (.slots ns-count-1)))
+ (is (= 1 (.nodes ns-count-3)))
+ (is (= 4 (.slots ns-count-3)))
+ (is (= 1 (.nodes ns-count-4)))
+ (is (= 4 (.slots ns-count-4)))
+ (is (= 2 (.nodes ns-count-5)))
+ (is (= 8 (.slots ns-count-5)))
)
(let [nodes (.takeNodesBySlots default-pool 3)]
(is (= 1 (.size nodes)))
@@ -532,14 +532,14 @@
ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 3)
ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 4)
ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 5)]
- (is (= 1 (._nodes ns-count-1)))
- (is (= 4 (._slots ns-count-1)))
- (is (= 1 (._nodes ns-count-3)))
- (is (= 4 (._slots ns-count-3)))
- (is (= 1 (._nodes ns-count-4)))
- (is (= 4 (._slots ns-count-4)))
- (is (= 1 (._nodes ns-count-5))) ;;Only 1 node can be stolen right now
- (is (= 4 (._slots ns-count-5)))
+ (is (= 1 (.nodes ns-count-1)))
+ (is (= 4 (.slots ns-count-1)))
+ (is (= 1 (.nodes ns-count-3)))
+ (is (= 4 (.slots ns-count-3)))
+ (is (= 1 (.nodes ns-count-4)))
+ (is (= 4 (.slots ns-count-4)))
+ (is (= 1 (.nodes ns-count-5))) ;;Only 1 node can be stolen right now
+ (is (= 4 (.slots ns-count-5)))
)
(let [nodes (.takeNodesBySlots isolated-pool 3)]
(is (= 1 (.size nodes)))
diff --git a/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java b/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
index a7102c4..2328c75 100644
--- a/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
+++ b/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
@@ -25,7 +25,7 @@
@Override
- public void prepare(Map<String, Object> StormConf) {
+ public void prepare(Map<String, Object> stormConf) {
//no-op
}
diff --git a/storm-server/pom.xml b/storm-server/pom.xml
index a65057d..bafbd0d 100644
--- a/storm-server/pom.xml
+++ b/storm-server/pom.xml
@@ -186,7 +186,7 @@
<artifactId>maven-checkstyle-plugin</artifactId>
<!--Note - the version would be inherited-->
<configuration>
- <maxAllowedViolations>763</maxAllowedViolations>
+ <maxAllowedViolations>0</maxAllowedViolations>
</configuration>
</plugin>
<plugin>
diff --git a/storm-server/src/main/java/org/apache/storm/DaemonConfig.java b/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
index 820d80f..def49ae 100644
--- a/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
+++ b/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
@@ -18,6 +18,21 @@
package org.apache.storm;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsImplementationOfClass;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsListEntryCustom;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryCustom;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsNoDuplicateInList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsNumber;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsString;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringOrStringList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.NotNull;
+import static org.apache.storm.validation.ConfigValidationAnnotations.Password;
+
import java.util.ArrayList;
import java.util.Map;
import org.apache.storm.container.ResourceIsolationInterface;
@@ -31,27 +46,12 @@
import org.apache.storm.validation.ConfigValidation;
import org.apache.storm.validation.Validated;
-import static org.apache.storm.validation.ConfigValidationAnnotations.NotNull;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsImplementationOfClass;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsListEntryCustom;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryCustom;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsNoDuplicateInList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsNumber;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsString;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringOrStringList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.Password;
-
/**
* Storm configs are specified as a plain old map. This class provides constants for all the configurations possible on a Storm cluster.
* Each constant is paired with an annotation that defines the validity criterion of the corresponding field. Default values for these
* configs can be found in defaults.yaml.
*
- * This class extends {@link org.apache.storm.Config} for supporting Storm Daemons.
+ * <p>This class extends {@link org.apache.storm.Config} for supporting Storm Daemons.
*/
public class DaemonConfig implements Validated {
@@ -90,7 +90,7 @@
/**
* A global task scheduler used to assign topologies's tasks to supervisors' workers.
*
- * If this is not set, a default system scheduler will be used.
+ * <p>If this is not set, a default system scheduler will be used.
*/
@IsString
public static final String STORM_SCHEDULER = "storm.scheduler";
@@ -146,7 +146,8 @@
* Otherwise, the scheduler will assume a supervisor is bad only when it does not receive supervisor heartbeat in time.
*/
@IsBoolean
- public static final String BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT = "blacklist.scheduler.assume.supervisor.bad.based.on.bad.slot";
+ public static final String BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT
+ = "blacklist.scheduler.assume.supervisor.bad.based.on.bad.slot";
/**
* Whether we want to display all the resource capacity and scheduled usage on the UI page. You MUST have this variable set if you are
@@ -211,9 +212,9 @@
/**
* The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
*
- * Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS. Note that the time it takes to delete an
- * inbox jar file is going to be somewhat more than NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often
- * NIMBUS_CLEANUP_FREQ_SECS is set to).
+ * <p>Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS. Note that the time
+ * it takes to delete an inbox jar file is going to be somewhat more than NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS
+ * (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
*
* @see #NIMBUS_CLEANUP_INBOX_FREQ_SECS
*/
diff --git a/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java b/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
index 4927da4..5e94a09 100644
--- a/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
+++ b/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
@@ -28,8 +28,9 @@
* Topology.
*/
public interface ILocalClusterTrackedTopologyAware extends ILocalCluster {
+
/**
- * Submit a tracked topology to be run in local mode
+ * Submit a tracked topology to be run in local mode.
*
* @param topologyName the name of the topology to use
* @param conf the config for the topology
@@ -41,7 +42,7 @@
ILocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology) throws TException;
/**
- * Submit a tracked topology to be run in local mode
+ * Submit a tracked topology to be run in local mode.
*
* @param topologyName the name of the topology to use
* @param conf the config for the topology
diff --git a/storm-server/src/main/java/org/apache/storm/LocalCluster.java b/storm-server/src/main/java/org/apache/storm/LocalCluster.java
index eb7a746..0af21ad 100644
--- a/storm-server/src/main/java/org/apache/storm/LocalCluster.java
+++ b/storm-server/src/main/java/org/apache/storm/LocalCluster.java
@@ -334,6 +334,7 @@
*
* @throws Exception on any Exception.
*/
+ @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
public static <T> T withLocalModeOverride(Callable<T> c, long ttlSec, Map<String, Object> daemonConf) throws Exception {
LOG.info("\n\n\t\tSTARTING LOCAL MODE CLUSTER\n\n");
Builder builder = new Builder();
@@ -422,6 +423,7 @@
}
/**
+ * Reference to nimbus.
* @return Nimbus itself so you can interact with it directly, if needed.
*/
public Nimbus getNimbus() {
@@ -429,6 +431,7 @@
}
/**
+ * Reference to metrics registry.
* @return The metrics registry for the local cluster.
*/
public StormMetricsRegistry getMetricRegistry() {
@@ -436,7 +439,8 @@
}
/**
- * @return the base config for the daemons.
+ * Get daemon configuration.
+ * @return the base config for the daemons
*/
public Map<String, Object> getDaemonConf() {
return new HashMap<>(daemonConf);
@@ -463,6 +467,24 @@
}
@Override
+ public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology)
+ throws TException {
+ return submitTopology(topologyName, conf, topology.getTopology());
+ }
+
+ @Override
+ public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology)
+ throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
+ try {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
+ submitTopology(name, conf, topology);
+ } catch (ParseException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, StormTopology topology,
SubmitOptions submitOpts)
throws TException {
@@ -474,12 +496,6 @@
}
@Override
- public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology)
- throws TException {
- return submitTopology(topologyName, conf, topology.getTopology());
- }
-
- @Override
public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, TrackedTopology topology,
SubmitOptions submitOpts)
throws TException {
@@ -487,6 +503,19 @@
}
@Override
+ public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology,
+ SubmitOptions options)
+ throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
+ try {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
+ submitTopologyWithOpts(name, conf, topology, options);
+ } catch (ParseException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
public void uploadNewCredentials(String topologyName, Credentials creds) throws TException {
getNimbus().uploadNewCredentials(topologyName, creds);
}
@@ -792,34 +821,9 @@
return trackId;
}
- @Override
- public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology)
- throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
- try {
- @SuppressWarnings("unchecked")
- Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
- submitTopology(name, conf, topology);
- } catch (ParseException e) {
- throw new RuntimeException(e);
- }
- }
-
//Nimbus Compatibility
@Override
- public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology,
- SubmitOptions options)
- throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
- try {
- @SuppressWarnings("unchecked")
- Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
- submitTopologyWithOpts(name, conf, topology, options);
- } catch (ParseException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
public void setLogConfig(String name, LogConfig config) throws TException {
// TODO Auto-generated method stub
throw new RuntimeException("NOT IMPLEMENTED YET");
diff --git a/storm-server/src/main/java/org/apache/storm/LocalDRPC.java b/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
index ba243cc..b11b2db 100644
--- a/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
+++ b/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
@@ -30,10 +30,11 @@
import org.apache.storm.utils.ServiceRegistry;
/**
- * A Local way to test DRPC
+ * A Local way to test DRPC.
*
- * try (LocalDRPC drpc = new LocalDRPC()) { // Do tests }
+ * <p>try <code>(LocalDRPC drpc = new LocalDRPC()) { // Do tests }</code>
*/
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public class LocalDRPC implements ILocalDRPC {
private final DRPC drpc;
diff --git a/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java b/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
index f2e81cf..e332d6e 100644
--- a/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
+++ b/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
@@ -31,28 +31,21 @@
private static Object lock = new Object();
/**
- * Register a process' handle
- *
- * @param pid
- * @param shutdownable
+ * Register a process' handle.
*/
public static void registerProcess(String pid, Shutdownable shutdownable) {
processMap.put(pid, shutdownable);
}
/**
- * Get all process handles
- *
- * @return
+ * Get all process handles.
*/
public static Collection<Shutdownable> getAllProcessHandles() {
return processMap.values();
}
/**
- * Kill a process
- *
- * @param pid
+ * Kill a process.
*/
public static void killProcess(String pid) {
synchronized (lock) {
@@ -67,7 +60,7 @@
}
/**
- * Kill all processes
+ * Kill all processes.
*/
public static void killAllProcesses() {
Set<String> pids = processMap.keySet();
diff --git a/storm-server/src/main/java/org/apache/storm/Testing.java b/storm-server/src/main/java/org/apache/storm/Testing.java
index 8f46a33..637d01d 100644
--- a/storm-server/src/main/java/org/apache/storm/Testing.java
+++ b/storm-server/src/main/java/org/apache/storm/Testing.java
@@ -79,7 +79,7 @@
/**
* Continue to execute body repeatedly until condition is true or TEST_TIMEOUT_MS has
- * passed
+ * passed.
* @param condition what we are waiting for
* @param body what to run in the loop
* @throws AssertionError if the loop timed out.
@@ -90,7 +90,7 @@
/**
* Continue to execute body repeatedly until condition is true or TEST_TIMEOUT_MS has
- * passed
+ * passed.
* @param timeoutMs the number of ms to wait before timing out.
* @param condition what we are waiting for
* @param body what to run in the loop
@@ -113,19 +113,20 @@
}
/**
- * Convenience method for data.stream.allMatch(pred)
+ * Convenience method for data.stream.allMatch(pred).
*/
public static <T> boolean isEvery(Collection<T> data, Predicate<T> pred) {
return data.stream().allMatch(pred);
}
/**
- * Run with simulated time
+ * Run with simulated time.
+ *
* @deprecated use ```
- * try (Time.SimulatedTime time = new Time.SimulatedTime()) {
- * ...
- * }
- * ```
+ * try (Time.SimulatedTime time = new Time.SimulatedTime()) {
+ * ...
+ * }
+ * ```
* @param code what to run
*/
@Deprecated
@@ -157,22 +158,23 @@
conf = new HashMap<>();
}
return new LocalCluster.Builder()
- .withSupervisors(supervisors)
- .withPortsPerSupervisor(ports)
- .withDaemonConf(conf)
- .withNimbusDaemon(param.isNimbusDaemon())
- .withTracked(id)
- .withSimulatedTime(simulated)
- .build();
+ .withSupervisors(supervisors)
+ .withPortsPerSupervisor(ports)
+ .withDaemonConf(conf)
+ .withNimbusDaemon(param.isNimbusDaemon())
+ .withTracked(id)
+ .withSimulatedTime(simulated)
+ .build();
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster()) {
+ * ...
+ * }
+ * ```
* @param code what to run
*/
@Deprecated
@@ -181,12 +183,13 @@
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
+ * ...
+ * }
+ * ```
* @param param configs to set in the cluster
* @param code what to run
*/
@@ -200,12 +203,13 @@
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
+ * ...
+ * }
+ * ```
* @param clusterConf some configs to set in the cluster
*/
@Deprecated
@@ -235,12 +239,13 @@
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) {
+ * ...
+ * }
+ * ```
* @param code what to run
*/
@Deprecated
@@ -249,12 +254,13 @@
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime()....build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime()....build()) {
+ * ...
+ * }
+ * ```
* @param param configs to set in the cluster
* @param code what to run
*/
@@ -268,12 +274,13 @@
}
/**
- * Run with a local cluster
+ * Run with a local cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder().withTracked().build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder().withTracked().build()) {
+ * ...
+ * }
+ * ```
* @param code what to run
*/
@Deprecated
@@ -282,26 +289,13 @@
}
/**
- * In a tracked topology some metrics are tracked. This provides a way to get those metrics.
- * This is intended mostly for internal testing.
- * @param id the id of the tracked cluster
- * @param key the name of the metric to get.
- * @return the metric
- */
- @SuppressWarnings("unchecked")
- @Deprecated
- public static int globalAmt(String id, String key) {
- LOG.warn("Reading tracked metrics for ID {}", id);
- return ((ConcurrentHashMap<String, AtomicInteger>) RegisteredGlobalState.getState(id)).get(key).get();
- }
-
- /**
- * Run with a local tracked cluster
+ * Run with a local tracked cluster.
+ *
* @deprecated use ```
- * try (LocalCluster cluster = new LocalCluster.Builder().withTracked()....build()) {
- * ...
- * }
- * ```
+ * try (LocalCluster cluster = new LocalCluster.Builder().withTracked()....build()) {
+ * ...
+ * }
+ * ```
* @param param configs to set in the cluster
* @param code what to run
*/
@@ -315,6 +309,21 @@
}
/**
+ * In a tracked topology some metrics are tracked. This provides a way to get those metrics.
+ * This is intended mostly for internal testing.
+ *
+ * @param id the id of the tracked cluster
+ * @param key the name of the metric to get.
+ * @return the metric
+ */
+ @SuppressWarnings("unchecked")
+ @Deprecated
+ public static int globalAmt(String id, String key) {
+ LOG.warn("Reading tracked metrics for ID {}", id);
+ return ((ConcurrentHashMap<String, AtomicInteger>) RegisteredGlobalState.getState(id)).get(key).get();
+ }
+
+ /**
* Track and capture a topology.
* This is intended mostly for internal testing.
*/
@@ -324,10 +333,10 @@
}
/**
- * Rewrites a topology so that all the tuples flowing through it are captured
+ * Rewrites a topology so that all the tuples flowing through it are captured.
* @param topology the topology to rewrite
* @return the modified topology and a new Bolt that can retrieve the
- * captured tuples.
+ * captured tuples.
*/
public static CapturedTopology<StormTopology> captureTopology(StormTopology topology) {
topology = topology.deepCopy(); //Don't modify the original
@@ -366,12 +375,11 @@
/**
* Run a topology to completion capturing all of the messages that are emitted. This only works when all of the spouts are
- * instances of {@link org.apache.storm.testing.CompletableSpout}
+ * instances of {@link org.apache.storm.testing.CompletableSpout}.
* @param cluster the cluster to submit the topology to
* @param topology the topology itself
- * @return a map of the component to the list of tuples it emitted.
- * @throws InterruptedException
- * @throws TException on any error from nimbus.
+ * @return a map of the component to the list of tuples it emitted
+ * @throws TException on any error from nimbus
*/
public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology) throws InterruptedException,
TException {
@@ -383,15 +391,13 @@
* instances of {@link org.apache.storm.testing.CompletableSpout} or are overwritten by MockedSources in param
* @param cluster the cluster to submit the topology to
* @param topology the topology itself
- * @param param parameters to describe how to complete a topology.
- * @return a map of the component to the list of tuples it emitted.
- * @throws InterruptedException
+ * @param param parameters to describe how to complete a topology
+ * @return a map of the component to the list of tuples it emitted
* @throws TException on any error from nimbus.
*/
public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology,
CompleteTopologyParam param) throws TException, InterruptedException {
Map<String, List<FixedTuple>> ret = null;
- IStormClusterState state = cluster.getClusterState();
CapturedTopology<StormTopology> capTopo = captureTopology(topology);
topology = capTopo.topology;
String topoName = param.getTopologyName();
@@ -407,8 +413,10 @@
spouts.get(mocked.getKey()).set_spout_object(Thrift.serializeComponentObject(newSpout));
}
}
- List<Object> spoutObjects = spouts.values().stream().
- map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object())).collect(Collectors.toList());
+ List<Object> spoutObjects = spouts.values()
+ .stream()
+ .map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object()))
+ .collect(Collectors.toList());
for (Object o : spoutObjects) {
if (!(o instanceof CompletableSpout)) {
@@ -427,6 +435,7 @@
cluster.advanceClusterTime(11);
}
+ IStormClusterState state = cluster.getClusterState();
String topoId = state.getTopoId(topoName).get();
//Give the topology time to come up without using it to wait for the spouts to complete
simulateWait(cluster);
@@ -435,28 +444,28 @@
timeoutMs = TEST_TIMEOUT_MS;
}
whileTimeout(timeoutMs,
- () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()),
- () -> {
- try {
- simulateWait(cluster);
- } catch (Exception e) {
- throw new RuntimeException();
- }
- });
+ () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()),
+ () -> {
+ try {
+ simulateWait(cluster);
+ } catch (Exception e) {
+ throw new RuntimeException();
+ }
+ });
KillOptions killOpts = new KillOptions();
killOpts.set_wait_secs(0);
cluster.killTopologyWithOpts(topoName, killOpts);
whileTimeout(timeoutMs,
- () -> state.assignmentInfo(topoId, null) != null,
- () -> {
- try {
- simulateWait(cluster);
- } catch (Exception e) {
- throw new RuntimeException();
- }
- });
+ () -> state.assignmentInfo(topoId, null) != null,
+ () -> {
+ try {
+ simulateWait(cluster);
+ } catch (Exception e) {
+ throw new RuntimeException();
+ }
+ });
if (param.getCleanupState()) {
for (Object o : spoutObjects) {
@@ -471,7 +480,7 @@
}
/**
- * If using simulated time simulate waiting for 10 seconds. This is intended for internal testing only.
+ * If using simulated time simulate waiting for 10 seconds. This is intended for internal testing only.
*/
public static void simulateWait(ILocalCluster cluster) throws InterruptedException {
if (Time.isSimulating()) {
@@ -481,7 +490,7 @@
}
/**
- * Get all of the tuples from a given component on the default stream
+ * Get all of the tuples from a given component on the default stream.
* @param results the results of running a completed topology
* @param componentId the id of the component to look at
* @return a list of the tuple values.
@@ -491,7 +500,7 @@
}
/**
- * Get all of the tuples from a given component on a given stream
+ * Get all of the tuples from a given component on a given stream.
* @param results the results of running a completed topology
* @param componentId the id of the component to look at
* @param streamId the id of the stream to look for.
@@ -520,63 +529,63 @@
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(CapturedTopology<TrackedTopology> topo) {
topo.topology.trackedWait();
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(CapturedTopology<TrackedTopology> topo, Integer amt) {
topo.topology.trackedWait(amt);
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(CapturedTopology<TrackedTopology> topo, Integer amt, Integer timeoutMs) {
topo.topology.trackedWait(amt, timeoutMs);
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(TrackedTopology topo) {
topo.trackedWait();
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(TrackedTopology topo, Integer amt) {
topo.trackedWait(amt);
}
/**
- * Simulated time wait for a tracked topology. This is intended for internal testing
+ * Simulated time wait for a tracked topology. This is intended for internal testing.
*/
public static void trackedWait(TrackedTopology topo, Integer amt, Integer timeoutMs) {
topo.trackedWait(amt, timeoutMs);
}
/**
- * Simulated time wait for a cluster. This is intended for internal testing
+ * Simulated time wait for a cluster. This is intended for internal testing.
*/
public static void advanceClusterTime(ILocalCluster cluster, Integer secs) throws InterruptedException {
advanceClusterTime(cluster, secs, 1);
}
/**
- * Simulated time wait for a cluster. This is intended for internal testing
+ * Simulated time wait for a cluster. This is intended for internal testing.
*/
public static void advanceClusterTime(ILocalCluster cluster, Integer secs, Integer step) throws InterruptedException {
cluster.advanceClusterTime(secs, step);
}
/**
- * Count how many times each element appears in the Collection
+ * Count how many times each element appears in the Collection.
* @param c a collection of values
* @return a map of the unique values in c to the count of those values.
*/
@@ -627,7 +636,7 @@
}
/**
- * Create a {@link org.apache.storm.tuple.Tuple} for use with testing
+ * Create a {@link org.apache.storm.tuple.Tuple} for use with testing.
* @param values the values to appear in the tuple
*/
public static Tuple testTuple(List<Object> values) {
@@ -635,7 +644,7 @@
}
/**
- * Create a {@link org.apache.storm.tuple.Tuple} for use with testing
+ * Create a {@link org.apache.storm.tuple.Tuple} for use with testing.
* @param values the values to appear in the tuple
* @param param parametrs describing more details about the tuple
*/
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java b/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
index 00d833f..a0644fa 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
@@ -49,6 +49,7 @@
return BLOBSTORE_SUBTREE;
}
+ @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public static CuratorFramework createZKClient(Map<String, Object> conf, DaemonType type) {
@SuppressWarnings("unchecked")
List<String> zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
@@ -191,7 +192,7 @@
out = null;
}
isSuccess = true;
- } catch(FileNotFoundException fnf) {
+ } catch (FileNotFoundException fnf) {
LOG.warn("Blobstore file for key '{}' does not exist or got deleted before it could be downloaded.", key, fnf);
} catch (IOException | AuthorizationException exception) {
throw new RuntimeException(exception);
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java b/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
index 4552981..3c87a61 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
@@ -38,12 +38,13 @@
* Very basic blob store impl with no ACL handling.
*/
public class FileBlobStoreImpl {
- private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000l;
+ private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000L;
private static final int BUCKETS = 1024;
private static final Logger LOG = LoggerFactory.getLogger(FileBlobStoreImpl.class);
private static final Timer timer = new Timer("FileBlobStore cleanup thread", true);
private File fullPath;
private TimerTask cleanup = null;
+
public FileBlobStoreImpl(File path, Map<String, Object> conf) throws IOException {
LOG.info("Creating new blob store based in {}", path);
fullPath = path;
@@ -66,13 +67,22 @@
}
/**
- * @return all keys that are available for reading.
- * @throws IOException on any error.
+ * List keys.
+ * @return all keys that are available for reading
+ * @throws IOException on any error
*/
public Iterator<String> listKeys() throws IOException {
return new KeyInHashDirIterator();
}
+ protected Iterator<String> listKeys(File path) throws IOException {
+ String[] files = path.list();
+ if (files != null) {
+ return Arrays.asList(files).iterator();
+ }
+ return new LinkedList<String>().iterator();
+ }
+
/**
* Get an input stream for reading a part.
* @param key the key of the part to read.
@@ -103,7 +113,7 @@
}
/**
- * Delete a key from the blob store
+ * Delete a key from the blob store.
* @param key the key to delete
* @throws IOException on any error
*/
@@ -164,14 +174,6 @@
return ret.iterator();
}
- protected Iterator<String> listKeys(File path) throws IOException {
- String[] files = path.list();
- if (files != null) {
- return Arrays.asList(files).iterator();
- }
- return new LinkedList<String>().iterator();
- }
-
protected void delete(File path) throws IOException {
if (Files.exists(path.toPath())) {
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java b/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
index 611d33c..39e1747 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
@@ -17,11 +17,11 @@
import java.util.TreeSet;
import org.apache.storm.generated.KeyNotFoundException;
import org.apache.storm.nimbus.NimbusInfo;
-import org.apache.storm.utils.WrappedKeyNotFoundException;
import org.apache.storm.shade.org.apache.curator.framework.CuratorFramework;
import org.apache.storm.shade.org.apache.zookeeper.CreateMode;
import org.apache.storm.shade.org.apache.zookeeper.KeeperException;
import org.apache.storm.shade.org.apache.zookeeper.ZooDefs;
+import org.apache.storm.utils.WrappedKeyNotFoundException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -39,16 +39,16 @@
* nimbus and the non-leader nimbus syncs after a call back is triggered by attempting
* to download the blob and finally updates its state inside the zookeeper.
*
- * A watch is placed on the /storm/blobstore/key1 and the znodes leader:8080-1 and
+ * <p>A watch is placed on the /storm/blobstore/key1 and the znodes leader:8080-1 and
* non-leader:8080-1 are ephemeral which implies that these nodes exist only until the
* connection between the corresponding nimbus and the zookeeper persist. If in case the
* nimbus crashes the node disappears under /storm/blobstore/key1.
*
- * The sequence number for the keys are handed over based on the following scenario:
+ * <p>The sequence number for the keys are handed over based on the following scenario:
* Lets assume there are three nimbodes up and running, one being the leader and the other
* being the non-leader.
*
- * 1. Create is straight forward.
+ * <p>1. Create is straight forward.
* Check whether the znode -> /storm/blobstore/key1 has been created or not. It implies
* the blob has not been created yet. If not created, it creates it and updates the zookeeper
* states under /storm/blobstore/key1 and /storm/blobstoremaxkeysequencenumber/key1.
@@ -58,65 +58,65 @@
* indicating the true value of number of updates for a blob. This node helps to maintain sanity in case
* leadership changes due to crashing.
*
- * 2. Delete does not require to hand over the sequence number.
+ * <p>2. Delete does not require to hand over the sequence number.
*
- * 3. Finally, the update has few scenarios.
+ * <p>3. Finally, the update has few scenarios.
*
- * The class implements a TreeSet. The basic idea is if all the nimbodes have the same
- * sequence number for the blob, then the number of elements in the set is 1 which holds
- * the latest value of sequence number. If the number of elements are greater than 1 then it
- * implies that there is sequence mismatch and there is need for syncing the blobs across
- * nimbodes.
+ * <p>The class implements a TreeSet. The basic idea is if all the nimbodes have the same
+ * sequence number for the blob, then the number of elements in the set is 1 which holds
+ * the latest value of sequence number. If the number of elements are greater than 1 then it
+ * implies that there is sequence mismatch and there is need for syncing the blobs across
+ * nimbodes.
*
- * The logic for handing over sequence numbers based on the state are described as follows
- * Here consider Nimbus-1 alias as N1 and Nimbus-2 alias as N2.
- * Scenario 1:
- * Example: Normal create/update scenario
- * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
- * Create-Key1 alive - Leader alive 1 1
- * Sync alive - Leader alive 1 1 (callback -> download) 1
- * Update-Key1 alive - Leader alive 2 1 2
- * Sync alive - Leader alive 2 2 (callback -> download) 2
+ * <p>The logic for handing over sequence numbers based on the state are described as follows
+ * Here consider Nimbus-1 alias as N1 and Nimbus-2 alias as N2.
+ * Scenario 1:
+ * Example: Normal create/update scenario
+ * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
+ * Create-Key1 alive - Leader alive 1 1
+ * Sync alive - Leader alive 1 1 (callback -> download) 1
+ * Update-Key1 alive - Leader alive 2 1 2
+ * Sync alive - Leader alive 2 2 (callback -> download) 2
*
- * Scenario 2:
- * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
- * Create alive - Leader alive 1 1
- * Sync alive - Leader alive 1 1 (callback -> download) 1
- * Update alive - Leader alive 2 1 2
- * Sync alive - Leader alive 2 2 (callback -> download) 2
- * Update alive - Leader alive 3 2 3
- * Crash crash - Leader alive 3 2 3
- * New - Leader crash alive - Leader 3 (Invalid) 2 3
- * Update crash alive - Leader 3 (Invalid) 4 (max-seq-num + 1) 4
- * N1-Restored alive alive - Leader 0 4 4
- * Sync alive alive - Leader 4 4 4
+ * <p>Scenario 2:
+ * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
+ * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
+ * Create alive - Leader alive 1 1
+ * Sync alive - Leader alive 1 1 (callback -> download) 1
+ * Update alive - Leader alive 2 1 2
+ * Sync alive - Leader alive 2 2 (callback -> download) 2
+ * Update alive - Leader alive 3 2 3
+ * Crash crash - Leader alive 3 2 3
+ * New - Leader crash alive - Leader 3 (Invalid) 2 3
+ * Update crash alive - Leader 3 (Invalid) 4 (max-seq-num + 1) 4
+ * N1-Restored alive alive - Leader 0 4 4
+ * Sync alive alive - Leader 4 4 4
*
- * Scenario 3:
- * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
- * Create alive - Leader alive 1 1
- * Sync alive - Leader alive 1 1 (callback -> download) 1
- * Update alive - Leader alive 2 1 2
- * Sync alive - Leader alive 2 2 (callback -> download) 2
- * Update alive - Leader alive 3 2 3
- * Crash crash - Leader alive 3 2 3
- * Elect Leader crash alive - Leader 3 (Invalid) 2 3
- * N1-Restored alive alive - Leader 3 2 3
- * Read/Update alive alive - Leader 3 4 (Downloads from N1) 4
- * Sync alive alive - Leader 4 (callback) 4 4
- * Here the download is triggered whenever an operation corresponding to the blob is triggered on the
- * nimbus like a read or update operation. Here, in the read/update call it is hard to know which call
- * is read or update. Hence, by incrementing the sequence number to max-seq-num + 1 we ensure that the
- * synchronization happens appropriately and all nimbodes have the same blob.
+ * <p>Scenario 3:
+ * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
+ * Operation Nimbus-1:state Nimbus-2:state Seq-Num-Nimbus-1 Seq-Num-Nimbus-2 Max-Seq-Num
+ * Create alive - Leader alive 1 1
+ * Sync alive - Leader alive 1 1 (callback -> download) 1
+ * Update alive - Leader alive 2 1 2
+ * Sync alive - Leader alive 2 2 (callback -> download) 2
+ * Update alive - Leader alive 3 2 3
+ * Crash crash - Leader alive 3 2 3
+ * Elect Leader crash alive - Leader 3 (Invalid) 2 3
+ * N1-Restored alive alive - Leader 3 2 3
+ * Read/Update alive alive - Leader 3 4 (Downloads from N1) 4
+ * Sync alive alive - Leader 4 (callback) 4 4
+ * Here the download is triggered whenever an operation corresponding to the blob is triggered on the
+ * nimbus like a read or update operation. Here, in the read/update call it is hard to know which call
+ * is read or update. Hence, by incrementing the sequence number to max-seq-num + 1 we ensure that the
+ * synchronization happens appropriately and all nimbodes have the same blob.
*/
public class KeySequenceNumber {
private static final Logger LOG = LoggerFactory.getLogger(KeySequenceNumber.class);
- private final String BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE = "/blobstoremaxkeysequencenumber";
+ private static final String BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE = "/blobstoremaxkeysequencenumber";
private final String key;
private final NimbusInfo nimbusInfo;
- private final int INT_CAPACITY = 4;
- private final int INITIAL_SEQUENCE_NUMBER = 1;
+ private static final int INT_CAPACITY = 4;
+ private static final int INITIAL_SEQUENCE_NUMBER = 1;
public KeySequenceNumber(String key, NimbusInfo nimbusInfo) {
this.key = key;
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
index b6bfd47..a8f519d 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
@@ -14,6 +14,12 @@
package org.apache.storm.blobstore;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.ADMIN;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.WRITE;
+import static org.apache.storm.daemon.nimbus.Nimbus.NIMBUS_SUBJECT;
+import static org.apache.storm.daemon.nimbus.Nimbus.getVersionForKey;
+
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
@@ -49,25 +55,22 @@
import org.apache.storm.utils.Utils;
import org.apache.storm.utils.WrappedKeyAlreadyExistsException;
import org.apache.storm.utils.WrappedKeyNotFoundException;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.storm.blobstore.BlobStoreAclHandler.*;
-import static org.apache.storm.daemon.nimbus.Nimbus.NIMBUS_SUBJECT;
-import static org.apache.storm.daemon.nimbus.Nimbus.getVersionForKey;
-
/**
* Provides a local file system backed blob store implementation for Nimbus.
*
- * For a local blob store the user and the supervisor use NimbusBlobStore Client API in order to talk to nimbus through thrift.
+ * <p>For a local blob store the user and the supervisor use NimbusBlobStore Client API in order to talk to nimbus through thrift.
* The authentication and authorization here is based on the subject.
* We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN
* access whereas the SUPERVISOR_ADMINS are given READ access in order to read and download the blobs form the nimbus.
*
- * The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
+ * <p>The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
* who has read, write or admin privileges in order to perform respective operations on the blob.
*
- * For local blob store
+ * <p>For local blob store
* 1. The USER interacts with nimbus to upload and access blobs through NimbusBlobStore Client API.
* 2. The USER sets the ACLs, and the blob access is validated against these ACLs.
* 3. The SUPERVISOR interacts with nimbus through the NimbusBlobStore Client API to download the blobs.
@@ -78,9 +81,9 @@
public static final Logger LOG = LoggerFactory.getLogger(LocalFsBlobStore.class);
private static final String DATA_PREFIX = "data_";
private static final String META_PREFIX = "meta_";
- private final String BLOBSTORE_SUBTREE = "/blobstore/";
+ private static final String BLOBSTORE_SUBTREE = "/blobstore/";
private final int allPermissions = READ | WRITE | ADMIN;
- protected BlobStoreAclHandler _aclHandler;
+ protected BlobStoreAclHandler aclHandler;
private NimbusInfo nimbusInfo;
private FileBlobStoreImpl fbs;
private Map<String, Object> conf;
@@ -103,7 +106,7 @@
} catch (IOException e) {
throw new RuntimeException(e);
}
- _aclHandler = new BlobStoreAclHandler(conf);
+ aclHandler = new BlobStoreAclHandler(conf);
try {
this.stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf));
} catch (Exception e) {
@@ -115,8 +118,6 @@
/**
* Sets up blobstore state for all current keys.
- * @throws KeyNotFoundException
- * @throws AuthorizationException
*/
private void setupBlobstore() throws AuthorizationException, KeyNotFoundException {
IStormClusterState state = stormClusterState;
@@ -198,7 +199,7 @@
throw new RuntimeException(e);
}
}
- }, 0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS))*1000);
+ }, 0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS)) * 1000);
}
@@ -207,18 +208,18 @@
KeyAlreadyExistsException {
LOG.debug("Creating Blob for key {}", key);
validateKey(key);
- _aclHandler.normalizeSettableBlobMeta(key, meta, who, allPermissions);
+ aclHandler.normalizeSettableBlobMeta(key, meta, who, allPermissions);
BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
- _aclHandler.hasPermissions(meta.get_acl(), allPermissions, who, key);
+ aclHandler.hasPermissions(meta.get_acl(), allPermissions, who, key);
if (fbs.exists(DATA_PREFIX + key)) {
throw new WrappedKeyAlreadyExistsException(key);
}
- BlobStoreFileOutputStream mOut = null;
+ BlobStoreFileOutputStream outputStream = null;
try {
- mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, true));
- mOut.write(Utils.thriftSerialize(meta));
- mOut.close();
- mOut = null;
+ outputStream = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, true));
+ outputStream.write(Utils.thriftSerialize(meta));
+ outputStream.close();
+ outputStream = null;
this.stormClusterState.setupBlob(key, this.nimbusInfo, getVersionForKey(key, this.nimbusInfo, zkClient));
return new BlobStoreFileOutputStream(fbs.write(DATA_PREFIX + key, true));
} catch (IOException e) {
@@ -226,9 +227,9 @@
} catch (KeyNotFoundException e) {
throw new RuntimeException(e);
} finally {
- if (mOut != null) {
+ if (outputStream != null) {
try {
- mOut.cancel();
+ outputStream.cancel();
} catch (IOException e) {
//Ignored
}
@@ -285,7 +286,7 @@
checkForBlobUpdate(key);
}
SettableBlobMeta meta = getStoredBlobMeta(key);
- _aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
+ aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
ReadableBlobMeta rbm = new ReadableBlobMeta();
rbm.set_settable(meta);
try {
@@ -298,9 +299,7 @@
}
/**
- * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi
- *
- * @param leaderElector
+ * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi.
*/
@Override
public void setLeaderElector(ILeaderElector leaderElector) {
@@ -311,22 +310,22 @@
public void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException {
validateKey(key);
checkForBlobOrDownload(key);
- _aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN);
+ aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN);
BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
SettableBlobMeta orig = getStoredBlobMeta(key);
- _aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
- BlobStoreFileOutputStream mOut = null;
+ aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
+ BlobStoreFileOutputStream outputStream = null;
try {
- mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, false));
- mOut.write(Utils.thriftSerialize(meta));
- mOut.close();
- mOut = null;
+ outputStream = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, false));
+ outputStream.write(Utils.thriftSerialize(meta));
+ outputStream.close();
+ outputStream = null;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
- if (mOut != null) {
+ if (outputStream != null) {
try {
- mOut.cancel();
+ outputStream.cancel();
} catch (IOException e) {
//Ignored
}
@@ -338,7 +337,7 @@
public void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
validateKey(key);
- if (!_aclHandler.checkForValidUsers(who, WRITE)) {
+ if (!aclHandler.checkForValidUsers(who, WRITE)) {
// need to get ACL from meta
LOG.debug("Retrieving meta to get ACL info... key: {} subject: {}", key, who);
@@ -368,7 +367,7 @@
private void checkPermission(String key, Subject who, int mask) throws KeyNotFoundException, AuthorizationException {
checkForBlobOrDownload(key);
SettableBlobMeta meta = getStoredBlobMeta(key);
- _aclHandler.hasPermissions(meta.get_acl(), mask, who, key);
+ aclHandler.hasPermissions(meta.get_acl(), mask, who, key);
}
private void deleteKeyIgnoringFileNotFound(String key) throws IOException {
@@ -390,7 +389,7 @@
checkForBlobUpdate(key);
}
SettableBlobMeta meta = getStoredBlobMeta(key);
- _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
+ aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
try {
return new BlobStoreFileInputStream(fbs.read(DATA_PREFIX + key));
} catch (IOException e) {
@@ -423,7 +422,7 @@
int replicationCount = 0;
validateKey(key);
SettableBlobMeta meta = getStoredBlobMeta(key);
- _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
+ aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + key) == null) {
return 0;
}
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
index d160ba6..c4f3164 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
@@ -25,60 +25,60 @@
public class LocalFsBlobStoreFile extends BlobStoreFile {
- private final String _key;
- private final boolean _isTmp;
- private final File _path;
- private final boolean _mustBeNew;
- private Long _modTime = null;
+ private final String key;
+ private final boolean isTmp;
+ private final File path;
+ private final boolean mustBeNew;
+ private Long modTime = null;
private SettableBlobMeta meta;
public LocalFsBlobStoreFile(File base, String name) {
if (BlobStoreFile.BLOBSTORE_DATA_FILE.equals(name)) {
- _isTmp = false;
+ isTmp = false;
} else {
Matcher m = TMP_NAME_PATTERN.matcher(name);
if (!m.matches()) {
throw new IllegalArgumentException("File name does not match '" + name + "' !~ " + TMP_NAME_PATTERN);
}
- _isTmp = true;
+ isTmp = true;
}
- _key = base.getName();
- _path = new File(base, name);
- _mustBeNew = false;
+ key = base.getName();
+ path = new File(base, name);
+ mustBeNew = false;
}
public LocalFsBlobStoreFile(File base, boolean isTmp, boolean mustBeNew) {
- _key = base.getName();
- _isTmp = isTmp;
- _mustBeNew = mustBeNew;
- if (_isTmp) {
- _path = new File(base, System.currentTimeMillis() + TMP_EXT);
+ key = base.getName();
+ this.isTmp = isTmp;
+ this.mustBeNew = mustBeNew;
+ if (this.isTmp) {
+ path = new File(base, System.currentTimeMillis() + TMP_EXT);
} else {
- _path = new File(base, BlobStoreFile.BLOBSTORE_DATA_FILE);
+ path = new File(base, BlobStoreFile.BLOBSTORE_DATA_FILE);
}
}
@Override
public void delete() throws IOException {
- _path.delete();
+ path.delete();
}
@Override
public boolean isTmp() {
- return _isTmp;
+ return isTmp;
}
@Override
public String getKey() {
- return _key;
+ return key;
}
@Override
public long getModTime() throws IOException {
- if (_modTime == null) {
- _modTime = _path.lastModified();
+ if (modTime == null) {
+ modTime = path.lastModified();
}
- return _modTime;
+ return modTime;
}
@Override
@@ -86,7 +86,7 @@
if (isTmp()) {
throw new IllegalStateException("Cannot read from a temporary part file.");
}
- return new FileInputStream(_path);
+ return new FileInputStream(path);
}
@Override
@@ -96,16 +96,16 @@
}
boolean success = false;
try {
- success = _path.createNewFile();
+ success = path.createNewFile();
} catch (IOException e) {
//Try to create the parent directory, may not work
- _path.getParentFile().mkdirs();
- success = _path.createNewFile();
+ path.getParentFile().mkdirs();
+ success = path.createNewFile();
}
if (!success) {
- throw new IOException(_path + " already exists");
+ throw new IOException(path + " already exists");
}
- return new FileOutputStream(_path);
+ return new FileOutputStream(path);
}
@Override
@@ -114,11 +114,11 @@
throw new IllegalStateException("Can only write to a temporary part file.");
}
- File dest = new File(_path.getParentFile(), BlobStoreFile.BLOBSTORE_DATA_FILE);
- if (_mustBeNew) {
- Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE);
+ File dest = new File(path.getParentFile(), BlobStoreFile.BLOBSTORE_DATA_FILE);
+ if (mustBeNew) {
+ Files.move(path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE);
} else {
- Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
+ Files.move(path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
}
}
@@ -142,12 +142,12 @@
@Override
public String toString() {
- return _path + ":" + (_isTmp ? "tmp" : BlobStoreFile.BLOBSTORE_DATA_FILE) + ":" + _key;
+ return path + ":" + (isTmp ? "tmp" : BlobStoreFile.BLOBSTORE_DATA_FILE) + ":" + key;
}
@Override
public long getFileLength() {
- return _path.length();
+ return path.length();
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java b/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
index 8e2ae3c..89df29a 100644
--- a/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
+++ b/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
@@ -23,7 +23,7 @@
public interface ResourceIsolationInterface {
/**
- * Called when starting up
+ * Called when starting up.
*
* @param conf the cluster config
* @throws IOException on any error.
@@ -85,6 +85,7 @@
long getMemoryUsage(String workerId) throws IOException;
/**
+ * Get the system free memory in MB.
* @return The amount of memory in bytes that are free on the system. This might not be the entire box, it might be
* within a parent resource group.
* @throws IOException on any error.
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
index e11e363..c2395b1 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
@@ -26,42 +26,42 @@
public class BlockingOutstandingRequest extends OutstandingRequest {
public static final RequestFactory<BlockingOutstandingRequest> FACTORY = BlockingOutstandingRequest::new;
- private Semaphore _sem;
- private volatile String _result = null;
- private volatile DRPCExecutionException _e = null;
+ private Semaphore sem;
+ private volatile String result = null;
+ private volatile DRPCExecutionException drpcExecutionException = null;
public BlockingOutstandingRequest(String function, DRPCRequest req) {
super(function, req);
- _sem = new Semaphore(0);
+ sem = new Semaphore(0);
}
public String getResult() throws DRPCExecutionException {
try {
- _sem.acquire();
+ sem.acquire();
} catch (InterruptedException e) {
//Ignored
}
- if (_result != null) {
- return _result;
+ if (result != null) {
+ return result;
}
- if (_e == null) {
- _e = new WrappedDRPCExecutionException("Internal Error: No Result and No Exception");
- _e.set_type(DRPCExceptionType.INTERNAL_ERROR);
+ if (drpcExecutionException == null) {
+ drpcExecutionException = new WrappedDRPCExecutionException("Internal Error: No Result and No Exception");
+ drpcExecutionException.set_type(DRPCExceptionType.INTERNAL_ERROR);
}
- throw _e;
+ throw drpcExecutionException;
}
@Override
public void returnResult(String result) {
- _result = result;
- _sem.release();
+ this.result = result;
+ sem.release();
}
@Override
public void fail(DRPCExecutionException e) {
- _e = e;
- _sem.release();
+ drpcExecutionException = e;
+ sem.release();
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
index 2d853f3..23183f0 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
@@ -46,6 +46,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public class DRPC implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DRPC.class);
private static final DRPCRequest NOTHING_REQUEST = new DRPCRequest("", "");
@@ -66,14 +67,14 @@
}
//Waiting to be fetched
- private final ConcurrentHashMap<String, ConcurrentLinkedQueue<OutstandingRequest>> _queues =
- new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<String, ConcurrentLinkedQueue<OutstandingRequest>> queues =
+ new ConcurrentHashMap<>();
//Waiting to be returned
- private final ConcurrentHashMap<String, OutstandingRequest> _requests =
- new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<String, OutstandingRequest> requests =
+ new ConcurrentHashMap<>();
private final Timer timer = new Timer("DRPC-CLEANUP-TIMER", true);
- private final AtomicLong _ctr = new AtomicLong(0);
- private final IAuthorizer _auth;
+ private final AtomicLong ctr = new AtomicLong(0);
+ private final IAuthorizer auth;
public DRPC(StormMetricsRegistry metricsRegistry, Map<String, Object> conf) {
this(metricsRegistry, mkAuthorizationHandler((String) conf.get(DaemonConfig.DRPC_AUTHORIZER), conf),
@@ -81,7 +82,7 @@
}
public DRPC(StormMetricsRegistry metricsRegistry, IAuthorizer auth, long timeoutMs) {
- _auth = auth;
+ this.auth = auth;
this.meterServerTimedOut = metricsRegistry.registerMeter("drpc:num-server-timedout-requests");
this.meterExecuteCalls = metricsRegistry.registerMeter("drpc:num-execute-calls");
this.meterResultCalls = metricsRegistry.registerMeter("drpc:num-result-calls");
@@ -135,22 +136,22 @@
}
private void checkAuthorization(String operation, String function) throws AuthorizationException {
- checkAuthorization(ReqContext.context(), _auth, operation, function);
+ checkAuthorization(ReqContext.context(), auth, operation, function);
}
private void checkAuthorizationNoLog(String operation, String function) throws AuthorizationException {
- checkAuthorization(ReqContext.context(), _auth, operation, function, false);
+ checkAuthorization(ReqContext.context(), auth, operation, function, false);
}
private void cleanup(String id) {
- OutstandingRequest req = _requests.remove(id);
+ OutstandingRequest req = requests.remove(id);
if (req != null && !req.wasFetched()) {
- _queues.get(req.getFunction()).remove(req);
+ queues.get(req.getFunction()).remove(req);
}
}
private void cleanupAll(long timeoutMs, DRPCExecutionException exp) {
- for (Entry<String, OutstandingRequest> e : _requests.entrySet()) {
+ for (Entry<String, OutstandingRequest> e : requests.entrySet()) {
OutstandingRequest req = e.getValue();
if (req.isTimedOut(timeoutMs)) {
req.fail(exp);
@@ -161,17 +162,17 @@
}
private String nextId() {
- return String.valueOf(_ctr.incrementAndGet());
+ return String.valueOf(ctr.incrementAndGet());
}
private ConcurrentLinkedQueue<OutstandingRequest> getQueue(String function) {
if (function == null) {
throw new IllegalArgumentException("The function for a request cannot be null");
}
- ConcurrentLinkedQueue<OutstandingRequest> queue = _queues.get(function);
+ ConcurrentLinkedQueue<OutstandingRequest> queue = queues.get(function);
if (queue == null) {
- _queues.putIfAbsent(function, new ConcurrentLinkedQueue<>());
- queue = _queues.get(function);
+ queues.putIfAbsent(function, new ConcurrentLinkedQueue<>());
+ queue = queues.get(function);
}
return queue;
}
@@ -179,7 +180,7 @@
public void returnResult(String id, String result) throws AuthorizationException {
meterResultCalls.mark();
LOG.debug("Got a result {} {}", id, result);
- OutstandingRequest req = _requests.get(id);
+ OutstandingRequest req = requests.get(id);
if (req != null) {
checkAuthorization("result", req.getFunction());
req.returnResult(result);
@@ -204,7 +205,7 @@
public void failRequest(String id, DRPCExecutionException e) throws AuthorizationException {
meterFailRequestCalls.mark();
LOG.debug("Got a fail {}", id);
- OutstandingRequest req = _requests.get(id);
+ OutstandingRequest req = requests.get(id);
if (req != null) {
checkAuthorization("failRequest", req.getFunction());
if (e == null) {
@@ -221,7 +222,7 @@
String id = nextId();
LOG.debug("Execute {} {}", functionName, funcArgs);
T req = factory.mkRequest(functionName, new DRPCRequest(funcArgs, id));
- _requests.put(id, req);
+ requests.put(id, req);
ConcurrentLinkedQueue<OutstandingRequest> q = getQueue(functionName);
q.add(req);
return req;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
index 01d2392..f14f0ab 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
@@ -24,36 +24,37 @@
import org.apache.storm.generated.DistributedRPC;
import org.apache.storm.generated.DistributedRPCInvocations;
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public class DRPCThrift implements DistributedRPC.Iface, DistributedRPCInvocations.Iface {
- private final DRPC _drpc;
+ private final DRPC drpc;
public DRPCThrift(DRPC drpc) {
- _drpc = drpc;
+ this.drpc = drpc;
}
@Override
public void result(String id, String result) throws AuthorizationException {
- _drpc.returnResult(id, result);
+ drpc.returnResult(id, result);
}
@Override
public DRPCRequest fetchRequest(String functionName) throws AuthorizationException {
- return _drpc.fetchRequest(functionName);
+ return drpc.fetchRequest(functionName);
}
@Override
public void failRequest(String id) throws AuthorizationException {
- _drpc.failRequest(id, null);
+ drpc.failRequest(id, null);
}
@Override
public void failRequestV2(String id, DRPCExecutionException e) throws AuthorizationException {
- _drpc.failRequest(id, e);
+ drpc.failRequest(id, e);
}
@Override
public String execute(String functionName, String funcArgs)
throws DRPCExecutionException, AuthorizationException {
- return _drpc.executeBlocking(functionName, funcArgs);
+ return drpc.executeBlocking(functionName, funcArgs);
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
index 06c596e..17a3985 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
@@ -23,35 +23,35 @@
import org.apache.storm.utils.Time;
public abstract class OutstandingRequest {
- private final long _start;
- private final String _function;
- private final DRPCRequest _req;
- private volatile boolean _fetched = false;
+ private final long start;
+ private final String function;
+ private final DRPCRequest req;
+ private volatile boolean fetched = false;
public OutstandingRequest(String function, DRPCRequest req) {
- _start = Time.currentTimeMillis();
- _function = function;
- _req = req;
+ start = Time.currentTimeMillis();
+ this.function = function;
+ this.req = req;
}
public DRPCRequest getRequest() {
- return _req;
+ return req;
}
public void fetched() {
- _fetched = true;
+ fetched = true;
}
public boolean wasFetched() {
- return _fetched;
+ return fetched;
}
public String getFunction() {
- return _function;
+ return function;
}
public boolean isTimedOut(long timeoutMs) {
- return (_start + timeoutMs) <= Time.currentTimeMillis();
+ return (start + timeoutMs) <= Time.currentTimeMillis();
}
public abstract void returnResult(String result);
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
index e6cd799..d847417 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
@@ -21,5 +21,6 @@
import org.apache.storm.generated.DRPCRequest;
public interface RequestFactory<T extends OutstandingRequest> {
- public T mkRequest(String function, DRPCRequest req);
+
+ T mkRequest(String function, DRPCRequest req);
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
index c030c64..7b4a59f 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
@@ -672,17 +672,17 @@
@SuppressWarnings("deprecation")
private static <T extends AutoCloseable> TimeCacheMap<String, T> makeBlobCacheMap(Map<String, Object> conf) {
return new TimeCacheMap<>(ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_BLOBSTORE_EXPIRATION_SECS), 600),
- (id, stream) -> {
- try {
- if (stream instanceof AtomicOutputStream) {
- ((AtomicOutputStream) stream).cancel();
- } else {
- stream.close();
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
+ (id, stream) -> {
+ try {
+ if (stream instanceof AtomicOutputStream) {
+ ((AtomicOutputStream) stream).cancel();
+ } else {
+ stream.close();
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
}
/**
@@ -1009,7 +1009,7 @@
* ignored. The delay is to prevent a race conditions such as when a blobstore is created and when the topology
* is submitted. It is possible the Nimbus cleanup timer task will find entries to delete between these two events.
*
- * Tracked topology entries are rotated out of the stored map periodically.
+ * <p>Tracked topology entries are rotated out of the stored map periodically.
*
* @param toposToClean topologies considered for cleanup
* @param conf the nimbus conf
@@ -1168,7 +1168,7 @@
private static void validateTopologySize(Map<String, Object> topoConf, Map<String, Object> nimbusConf,
StormTopology topology) throws InvalidTopologyException {
// check allowedWorkers only if the scheduler is not the Resource Aware Scheduler
- if (!ServerUtils.isRAS(nimbusConf)) {
+ if (!ServerUtils.isRas(nimbusConf)) {
int workerCount = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 1);
Integer allowedWorkers = ObjectReader.getInt(nimbusConf.get(DaemonConfig.NIMBUS_SLOTS_PER_TOPOLOGY), null);
if (allowedWorkers != null && workerCount > allowedWorkers) {
@@ -1245,8 +1245,6 @@
private static Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> extractSupervisorMetrics(ClusterSummary summ) {
Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> ret = new HashMap<>();
for (SupervisorSummary sup : summ.get_supervisors()) {
- IClusterMetricsConsumer.SupervisorInfo info =
- new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs());
List<DataPoint> metrics = new ArrayList<>();
metrics.add(new DataPoint("slotsTotal", sup.get_num_workers()));
metrics.add(new DataPoint("slotsUsed", sup.get_num_used_workers()));
@@ -1254,6 +1252,8 @@
metrics.add(new DataPoint("totalCpu", sup.get_total_resources().get(Constants.COMMON_CPU_RESOURCE_NAME)));
metrics.add(new DataPoint("usedMem", sup.get_used_mem()));
metrics.add(new DataPoint("usedCpu", sup.get_used_cpu()));
+ IClusterMetricsConsumer.SupervisorInfo info =
+ new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs());
ret.put(info, metrics);
}
return ret;
@@ -1277,6 +1277,132 @@
}
}
+ @VisibleForTesting
+ public void launchServer() throws Exception {
+ try {
+ IStormClusterState state = stormClusterState;
+ NimbusInfo hpi = nimbusHostPortInfo;
+
+ LOG.info("Starting Nimbus with conf {}", ConfigUtils.maskPasswords(conf));
+ validator.prepare(conf);
+
+ //add to nimbuses
+ state.addNimbusHost(hpi.getHost(),
+ new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION));
+ leaderElector.addToLeaderLockQueue();
+ this.blobStore.startSyncBlobs();
+
+ for (ClusterMetricsConsumerExecutor exec: clusterConsumerExceutors) {
+ exec.prepare();
+ }
+
+ if (isLeader()) {
+ for (String topoId : state.activeStorms()) {
+ transition(topoId, TopologyActions.STARTUP, null);
+ }
+ clusterMetricSet.setActive(true);
+ }
+
+ final boolean doNotReassign = (Boolean) conf.getOrDefault(ServerConfigUtils.NIMBUS_DO_NOT_REASSIGN, false);
+ timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)),
+ () -> {
+ try {
+ if (!doNotReassign) {
+ mkAssignments();
+ }
+ doCleanup();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+ // Schedule Nimbus inbox cleaner
+ final int jarExpSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_INBOX_JAR_EXPIRATION_SECS));
+ timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CLEANUP_INBOX_FREQ_SECS)),
+ () -> {
+ try {
+ cleanInbox(getInbox(), jarExpSecs);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+
+ // Schedule topology history cleaner
+ Integer interval = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_INTERVAL_SECS), null);
+ if (interval != null) {
+ final int lvCleanupAgeMins = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_AGE_MINS));
+ timer.scheduleRecurring(0, interval,
+ () -> {
+ try {
+ cleanTopologyHistory(lvCleanupAgeMins);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CREDENTIAL_RENEW_FREQ_SECS)),
+ () -> {
+ try {
+ renewCredentials();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+ metricsRegistry.registerGauge("nimbus:total-available-memory-non-negative", () -> nodeIdToResources.get().values()
+ .parallelStream()
+ .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableMem(), 0))
+ .sum());
+ metricsRegistry.registerGauge("nimbus:available-cpu-non-negative", () -> nodeIdToResources.get().values()
+ .parallelStream()
+ .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableCpu(), 0))
+ .sum());
+ metricsRegistry.registerGauge("nimbus:total-memory", () -> nodeIdToResources.get().values()
+ .parallelStream()
+ .mapToDouble(SupervisorResources::getTotalMem)
+ .sum());
+ metricsRegistry.registerGauge("nimbus:total-cpu", () -> nodeIdToResources.get().values()
+ .parallelStream()
+ .mapToDouble(SupervisorResources::getTotalCpu)
+ .sum());
+ metricsRegistry.registerGauge("nimbus:longest-scheduling-time-ms", () -> {
+ //We want to update longest scheduling time in real time in case scheduler get stuck
+ // Get current time before startTime to avoid potential race with scheduler's Timer
+ Long currTime = Time.nanoTime();
+ Long startTime = schedulingStartTimeNs.get();
+ return TimeUnit.NANOSECONDS.toMillis(startTime == null
+ ? longestSchedulingTime.get()
+ : Math.max(currTime - startTime, longestSchedulingTime.get()));
+ });
+ metricsRegistry.registerMeter("nimbus:num-launched").mark();
+
+ timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.STORM_CLUSTER_METRICS_CONSUMER_PUBLISH_INTERVAL_SECS)),
+ () -> {
+ try {
+ if (isLeader()) {
+ sendClusterMetricsToExecutors();
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+
+ timer.scheduleRecurring(5, 5, clusterMetricSet);
+ } catch (Exception e) {
+ if (Utils.exceptionCauseIsInstanceOf(InterruptedException.class, e)) {
+ throw e;
+ }
+
+ if (Utils.exceptionCauseIsInstanceOf(InterruptedIOException.class, e)) {
+ throw e;
+ }
+ LOG.error("Error on initialization of nimbus", e);
+ Utils.exitProcess(13, "Error on initialization of nimbus");
+ }
+ }
+
private static Nimbus launchServer(Map<String, Object> conf, INimbus inimbus) throws Exception {
StormCommon.validateDistributedMode(conf);
validatePortAvailable(conf);
@@ -1314,6 +1440,7 @@
launch(new StandaloneINimbus());
}
+ @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
private static CuratorFramework makeZKClient(Map<String, Object> conf) {
List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
@@ -1431,10 +1558,10 @@
@VisibleForTesting
static void validateTopologyWorkerMaxHeapSizeConfigs(
- Map<String, Object> stormConf, StormTopology topology, double defaultWorkerMaxHeapSizeMB) {
+ Map<String, Object> stormConf, StormTopology topology, double defaultWorkerMaxHeapSizeMb) {
double largestMemReq = getMaxExecutorMemoryUsageForTopo(topology, stormConf);
double topologyWorkerMaxHeapSize =
- ObjectReader.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeapSizeMB);
+ ObjectReader.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeapSizeMb);
if (topologyWorkerMaxHeapSize < largestMemReq) {
throw new IllegalArgumentException(
"Topology will not be able to be successfully scheduled: Config "
@@ -2013,10 +2140,10 @@
private boolean isFragmented(SupervisorResources supervisorResources) {
double minMemory = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), 256.0)
+ ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB), 128.0);
- double minCPU = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), 50.0)
+ double minCpu = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), 50.0)
+ ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT), 50.0);
- return minMemory > supervisorResources.getAvailableMem() || minCPU > supervisorResources.getAvailableCpu();
+ return minMemory > supervisorResources.getAvailableMem() || minCpu > supervisorResources.getAvailableCpu();
}
private double fragmentedMemory() {
@@ -2148,6 +2275,7 @@
return ret;
}
+ @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
private boolean isReadyForMKAssignments() throws Exception {
if (isLeader()) {
if (isHeartbeatsRecovered()) {
@@ -2384,7 +2512,6 @@
String principal, Map<String, Object> topoConf, StormTopology stormTopology)
throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException {
assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus);
- IStormClusterState state = stormClusterState;
Map<String, Integer> numExecutors = new HashMap<>();
StormTopology topology = StormCommon.systemTopology(topoConf, stormTopology);
for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) {
@@ -2403,6 +2530,7 @@
base.set_owner(owner);
base.set_principal(principal);
base.set_component_debug(new HashMap<>());
+ IStormClusterState state = stormClusterState;
state.activateStorm(topoId, base, topoConf);
idToExecutors.getAndUpdate(new Assoc<>(topoId,
new HashSet<>(computeExecutors(topoId, base, topoConf, stormTopology))));
@@ -2434,8 +2562,7 @@
@VisibleForTesting
public void checkAuthorization(String topoName, Map<String, Object> topoConf, String operation, ReqContext context)
- throws AuthorizationException {
- IAuthorizer aclHandler = authorizationHandler;
+ throws AuthorizationException {
IAuthorizer impersonationAuthorizer = impersonationAuthorizationHandler;
if (context == null) {
context = ReqContext.context();
@@ -2465,6 +2592,7 @@
}
}
+ IAuthorizer aclHandler = authorizationHandler;
if (aclHandler != null) {
if (!aclHandler.permit(context, operation, checkConf)) {
ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(), operation,
@@ -2821,8 +2949,7 @@
}
private CommonTopoInfo getCommonTopoInfo(String topoId, String operation) throws NotAliveException,
- AuthorizationException, IOException, InvalidTopologyException {
- IStormClusterState state = stormClusterState;
+ AuthorizationException, IOException, InvalidTopologyException {
CommonTopoInfo ret = new CommonTopoInfo();
ret.topoConf = tryReadTopoConf(topoId, topoCache);
ret.topoName = (String) ret.topoConf.get(Config.TOPOLOGY_NAME);
@@ -2830,6 +2957,7 @@
StormTopology topology = tryReadTopology(topoId, topoCache);
ret.topology = StormCommon.systemTopology(ret.topoConf, topology);
ret.taskToComponent = StormCommon.stormTaskInfo(topology, ret.topoConf);
+ IStormClusterState state = stormClusterState;
ret.base = state.stormBase(topoId, null);
if (ret.base != null && ret.base.is_set_launch_time_secs()) {
ret.launchTimeSecs = ret.base.get_launch_time_secs();
@@ -2846,131 +2974,6 @@
ret.allComponents = new HashSet<>(ret.taskToComponent.values());
return ret;
}
-
- @VisibleForTesting
- public void launchServer() throws Exception {
- try {
- IStormClusterState state = stormClusterState;
- NimbusInfo hpi = nimbusHostPortInfo;
-
- LOG.info("Starting Nimbus with conf {}", ConfigUtils.maskPasswords(conf));
- validator.prepare(conf);
-
- //add to nimbuses
- state.addNimbusHost(hpi.getHost(),
- new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION));
- leaderElector.addToLeaderLockQueue();
- this.blobStore.startSyncBlobs();
-
- for (ClusterMetricsConsumerExecutor exec: clusterConsumerExceutors) {
- exec.prepare();
- }
-
- if (isLeader()) {
- for (String topoId : state.activeStorms()) {
- transition(topoId, TopologyActions.STARTUP, null);
- }
- clusterMetricSet.setActive(true);
- }
-
- final boolean doNotReassign = (Boolean) conf.getOrDefault(ServerConfigUtils.NIMBUS_DO_NOT_REASSIGN, false);
- timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)),
- () -> {
- try {
- if (!doNotReassign) {
- mkAssignments();
- }
- doCleanup();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
-
- // Schedule Nimbus inbox cleaner
- final int jarExpSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_INBOX_JAR_EXPIRATION_SECS));
- timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CLEANUP_INBOX_FREQ_SECS)),
- () -> {
- try {
- cleanInbox(getInbox(), jarExpSecs);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
-
-
- // Schedule topology history cleaner
- Integer interval = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_INTERVAL_SECS), null);
- if (interval != null) {
- final int lvCleanupAgeMins = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_AGE_MINS));
- timer.scheduleRecurring(0, interval,
- () -> {
- try {
- cleanTopologyHistory(lvCleanupAgeMins);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
- }
-
- timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CREDENTIAL_RENEW_FREQ_SECS)),
- () -> {
- try {
- renewCredentials();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
-
- metricsRegistry.registerGauge("nimbus:total-available-memory-non-negative", () -> nodeIdToResources.get().values()
- .parallelStream()
- .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableMem(), 0))
- .sum());
- metricsRegistry.registerGauge("nimbus:available-cpu-non-negative", () -> nodeIdToResources.get().values()
- .parallelStream()
- .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableCpu(), 0))
- .sum());
- metricsRegistry.registerGauge("nimbus:total-memory", () -> nodeIdToResources.get().values()
- .parallelStream()
- .mapToDouble(SupervisorResources::getTotalMem)
- .sum());
- metricsRegistry.registerGauge("nimbus:total-cpu", () -> nodeIdToResources.get().values()
- .parallelStream()
- .mapToDouble(SupervisorResources::getTotalCpu)
- .sum());
- metricsRegistry.registerGauge("nimbus:longest-scheduling-time-ms", () -> {
- //We want to update longest scheduling time in real time in case scheduler get stuck
- // Get current time before startTime to avoid potential race with scheduler's Timer
- Long currTime = Time.nanoTime();
- Long startTime = schedulingStartTimeNs.get();
- return TimeUnit.NANOSECONDS.toMillis(startTime == null ?
- longestSchedulingTime.get() : Math.max(currTime - startTime, longestSchedulingTime.get()));
- });
- metricsRegistry.registerMeter("nimbus:num-launched").mark();
-
- timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.STORM_CLUSTER_METRICS_CONSUMER_PUBLISH_INTERVAL_SECS)),
- () -> {
- try {
- if (isLeader()) {
- sendClusterMetricsToExecutors();
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
-
- timer.scheduleRecurring(5, 5, clusterMetricSet);
- } catch (Exception e) {
- if (Utils.exceptionCauseIsInstanceOf(InterruptedException.class, e)) {
- throw e;
- }
-
- if (Utils.exceptionCauseIsInstanceOf(InterruptedIOException.class, e)) {
- throw e;
- }
- LOG.error("Error on initialization of nimbus", e);
- Utils.exitProcess(13, "Error on initialization of nimbus");
- }
- }
@VisibleForTesting
public boolean awaitLeadership(long timeout, TimeUnit timeUnit) throws InterruptedException {
@@ -3035,15 +3038,15 @@
ReqContext req = ReqContext.context();
Principal principal = req.principal();
String submitterPrincipal = principal == null ? null : principal.toString();
- String submitterUser = principalToLocal.toLocal(principal);
- String systemUser = System.getProperty("user.name");
@SuppressWarnings("unchecked")
Set<String> topoAcl = new HashSet<>((List<String>) topoConf.getOrDefault(Config.TOPOLOGY_USERS, Collections.emptyList()));
topoAcl.add(submitterPrincipal);
+ String submitterUser = principalToLocal.toLocal(principal);
topoAcl.add(submitterUser);
String topologyPrincipal = Utils.OR(submitterPrincipal, "");
topoConf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, topologyPrincipal);
+ String systemUser = System.getProperty("user.name");
String topologyOwner = Utils.OR(submitterUser, systemUser);
topoConf.put(Config.TOPOLOGY_SUBMITTER_USER, topologyOwner); //Don't let the user set who we launch as
topoConf.put(Config.TOPOLOGY_USERS, new ArrayList<>(topoAcl));
@@ -3078,8 +3081,8 @@
// if the Resource Aware Scheduler is used,
// we might need to set the number of acker executors and eventlogger executors to be the estimated number of workers.
- if (ServerUtils.isRAS(conf)) {
- int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRASTopo(totalConf, topology);
+ if (ServerUtils.isRas(conf)) {
+ int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(totalConf, topology);
int numAckerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_ACKER_EXECUTORS), estimatedNumWorker);
int numEventLoggerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS), estimatedNumWorker);
@@ -3374,7 +3377,6 @@
if (topoId == null) {
throw new WrappedNotAliveException(topoName);
}
- boolean hasCompId = componentId != null && !componentId.isEmpty();
DebugOptions options = new DebugOptions();
options.set_enable(enable);
@@ -3384,6 +3386,7 @@
StormBase updates = new StormBase();
//For backwards compatability
updates.set_component_executors(Collections.emptyMap());
+ boolean hasCompId = componentId != null && !componentId.isEmpty();
String key = hasCompId ? componentId : topoId;
updates.put_to_component_debug(key, options);
@@ -4031,12 +4034,10 @@
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
String topoName = common.topoName;
IStormClusterState state = stormClusterState;
- int launchTimeSecs = common.launchTimeSecs;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
StormTopology topology = common.topology;
- Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
StormBase base = common.base;
if (base == null) {
throw new WrappedNotAliveException(topoId);
@@ -4080,6 +4081,7 @@
topoPageInfo.set_storm_version(topology.get_storm_version());
}
+ Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
Map<String, NormalizedResourceRequest> spoutResources = ResourceUtils.getSpoutsResources(topology, topoConf);
for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_spout_agg_stats().entrySet()) {
CommonAggregateStats commonStats = entry.getValue().get_common_stats();
@@ -4128,6 +4130,7 @@
topoPageInfo.set_assigned_shared_on_heap_memory(resources.getAssignedSharedMemOnHeap());
topoPageInfo.set_assigned_regular_on_heap_memory(resources.getAssignedNonSharedMemOnHeap());
}
+ int launchTimeSecs = common.launchTimeSecs;
topoPageInfo.set_name(topoName);
topoPageInfo.set_status(extractStatusStr(base));
topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
@@ -4968,66 +4971,74 @@
}
};
- clusterSummaryMetrics.put("cluster:num-nimbus-leaders", new DerivativeGauge<ClusterSummary, Long>(cachedSummary) {
- @Override
- protected Long transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_nimbuses().stream()
- .filter(NimbusSummary::is_isLeader)
- .count();
- }
- });
- clusterSummaryMetrics.put("cluster:num-nimbuses", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
- @Override
- protected Integer transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_nimbuses_size();
- }
- });
- clusterSummaryMetrics.put("cluster:num-supervisors", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
- @Override
- protected Integer transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_supervisors_size();
- }
- });
- clusterSummaryMetrics.put("cluster:num-topologies", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
- @Override
- protected Integer transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_topologies_size();
- }
- });
- clusterSummaryMetrics.put("cluster:num-total-workers", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
- @Override
- protected Integer transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_supervisors().stream()
- .mapToInt(SupervisorSummary::get_num_workers)
- .sum();
- }
- });
- clusterSummaryMetrics.put("cluster:num-total-used-workers", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
- @Override
- protected Integer transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_supervisors().stream()
- .mapToInt(SupervisorSummary::get_num_used_workers)
- .sum();
- }
- });
- clusterSummaryMetrics.put("cluster:total-fragmented-memory-non-negative", new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
- @Override
- protected Double transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_supervisors().stream()
- //Filtered negative value
- .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0))
- .sum();
- }
- });
- clusterSummaryMetrics.put("cluster:total-fragmented-cpu-non-negative", new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
- @Override
- protected Double transform(ClusterSummary clusterSummary) {
- return clusterSummary.get_supervisors().stream()
- //Filtered negative value
- .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0))
- .sum();
- }
- });
+ clusterSummaryMetrics.put("cluster:num-nimbus-leaders",
+ new DerivativeGauge<ClusterSummary, Long>(cachedSummary) {
+ @Override
+ protected Long transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_nimbuses().stream()
+ .filter(NimbusSummary::is_isLeader)
+ .count();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:num-nimbuses",
+ new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+ @Override
+ protected Integer transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_nimbuses_size();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:num-supervisors",
+ new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+ @Override
+ protected Integer transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_supervisors_size();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:num-topologies",
+ new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+ @Override
+ protected Integer transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_topologies_size();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:num-total-workers",
+ new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+ @Override
+ protected Integer transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_supervisors().stream()
+ .mapToInt(SupervisorSummary::get_num_workers)
+ .sum();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:num-total-used-workers",
+ new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+ @Override
+ protected Integer transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_supervisors().stream()
+ .mapToInt(SupervisorSummary::get_num_used_workers)
+ .sum();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:total-fragmented-memory-non-negative",
+ new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
+ @Override
+ protected Double transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_supervisors().stream()
+ //Filtered negative value
+ .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0))
+ .sum();
+ }
+ });
+ clusterSummaryMetrics.put("cluster:total-fragmented-cpu-non-negative",
+ new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
+ @Override
+ protected Double transform(ClusterSummary clusterSummary) {
+ return clusterSummary.get_supervisors().stream()
+ //Filtered negative value
+ .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0))
+ .sum();
+ }
+ });
}
private void updateHistogram(ClusterSummary newSummary) {
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
index 0b0e70c..591a3b9 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
@@ -12,6 +12,8 @@
package org.apache.storm.daemon.nimbus;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
+
import java.io.IOException;
import java.util.List;
import java.util.Map;
@@ -31,8 +33,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
-
/**
* Cache topologies and topology confs from the blob store.
* Makes reading this faster because it can skip
@@ -44,6 +44,7 @@
private final BlobStoreAclHandler aclHandler;
private final ConcurrentHashMap<String, WithAcl<StormTopology>> topos = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, WithAcl<Map<String, Object>>> confs = new ConcurrentHashMap<>();
+
public TopoCache(BlobStore store, Map<String, Object> conf) {
this.store = store;
aclHandler = new BlobStoreAclHandler(conf);
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
index bc41da5..05f9996 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
@@ -13,7 +13,7 @@
package org.apache.storm.daemon.nimbus;
/**
- * Actions that can be done to a topology in nimbus
+ * Actions that can be done to a topology in nimbus.
*/
public enum TopologyActions {
STARTUP,
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
index a34292c..f0db842 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
@@ -36,6 +36,7 @@
private double assignedNonSharedMemOnHeap;
private double assignedNonSharedMemOffHeap;
private double assignedCpu;
+
private TopologyResources(TopologyDetails td, Collection<WorkerResources> workers,
Map<String, Double> nodeIdToSharedOffHeapNode) {
requestedMemOnHeap = td.getTotalRequestedMemOnHeap();
@@ -79,12 +80,15 @@
assignedMemOffHeap += sharedOff;
}
}
+
public TopologyResources(TopologyDetails td, SchedulerAssignment assignment) {
this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeapNode(assignment));
}
+
public TopologyResources(TopologyDetails td, Assignment assignment) {
this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeapNode(assignment));
}
+
public TopologyResources() {
this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
index 37019b9..5308df6 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
@@ -15,8 +15,9 @@
import org.apache.storm.generated.StormBase;
/**
- * A transition from one state to another
+ * A transition from one state to another.
*/
interface TopologyStateTransition {
+
StormBase transition(Object argument, Nimbus nimbus, String topoId, StormBase base) throws Exception;
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
index 1e61be6..2a61523 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
@@ -18,8 +18,10 @@
package org.apache.storm.daemon.supervisor;
+import static org.apache.storm.daemon.nimbus.Nimbus.MIN_VERSION_SUPPORT_RPC_HEARTBEAT;
+import static org.apache.storm.utils.Utils.OR;
+
import java.io.File;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -41,6 +43,7 @@
import org.apache.storm.generated.ProfileRequest;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.generated.WorkerResources;
+import org.apache.storm.metric.StormMetricsRegistry;
import org.apache.storm.shade.com.google.common.base.Joiner;
import org.apache.storm.shade.com.google.common.collect.Lists;
import org.apache.storm.utils.ConfigUtils;
@@ -55,30 +58,24 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.storm.daemon.nimbus.Nimbus.MIN_VERSION_SUPPORT_RPC_HEARTBEAT;
-import static org.apache.storm.utils.Utils.OR;
-
-import org.apache.storm.metric.StormMetricsRegistry;
/**
* A container that runs processes on the local box.
*/
public class BasicContainer extends Container {
- static final TopoMetaLRUCache TOPO_META_CACHE = new TopoMetaLRUCache();
+ static final TopoMetaLruCache TOPO_META_CACHE = new TopoMetaLruCache();
private static final Logger LOG = LoggerFactory.getLogger(BasicContainer.class);
- private static final FilenameFilter jarFilter = (dir, name) -> name.endsWith(".jar");
- private static final Joiner CPJ =
- Joiner.on(File.pathSeparator).skipNulls();
- protected final LocalState _localState;
- protected final String _profileCmd;
- protected final String _stormHome = System.getProperty(ConfigUtils.STORM_HOME);
+ private static final Joiner CPJ = Joiner.on(File.pathSeparator).skipNulls();
+ protected final LocalState localState;
+ protected final String profileCmd;
+ protected final String stormHome = System.getProperty(ConfigUtils.STORM_HOME);
protected final double hardMemoryLimitMultiplier;
protected final long hardMemoryLimitOver;
- protected final long lowMemoryThresholdMB;
+ protected final long lowMemoryThresholdMb;
protected final long mediumMemoryThresholdMb;
protected final long mediumMemoryGracePeriodMs;
- protected volatile boolean _exitedEarly = false;
- protected volatile long memoryLimitMB;
+ protected volatile boolean exitedEarly = false;
+ protected volatile long memoryLimitMb;
protected volatile long memoryLimitExceededStart;
/**
@@ -132,7 +129,7 @@
super(type, conf, supervisorId, supervisorPort, port, assignment,
resourceIsolationManager, workerId, topoConf, ops, metricsRegistry, containerMemoryTracker);
assert (localState != null);
- _localState = localState;
+ this.localState = localState;
if (type.isRecovery() && !type.isOnlyKillable()) {
synchronized (localState) {
@@ -147,23 +144,23 @@
throw new ContainerRecoveryException("Could not find worker id for " + port + " " + assignment);
}
LOG.info("Recovered Worker {}", wid);
- _workerId = wid;
+ this.workerId = wid;
}
- } else if (_workerId == null) {
+ } else if (this.workerId == null) {
createNewWorkerId();
}
if (profileCmd == null) {
- profileCmd = _stormHome + File.separator + "bin" + File.separator
+ profileCmd = stormHome + File.separator + "bin" + File.separator
+ conf.get(DaemonConfig.WORKER_PROFILER_COMMAND);
}
- _profileCmd = profileCmd;
+ this.profileCmd = profileCmd;
hardMemoryLimitMultiplier =
ObjectReader.getDouble(conf.get(DaemonConfig.STORM_SUPERVISOR_HARD_MEMORY_LIMIT_MULTIPLIER), 2.0);
hardMemoryLimitOver =
ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_HARD_LIMIT_MEMORY_OVERAGE_MB), 0);
- lowMemoryThresholdMB = ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_LOW_MEMORY_THRESHOLD_MB), 1024);
+ lowMemoryThresholdMb = ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_LOW_MEMORY_THRESHOLD_MB), 1024);
mediumMemoryThresholdMb =
ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_MEDIUM_MEMORY_THRESHOLD_MB), 1536);
mediumMemoryGracePeriodMs =
@@ -171,14 +168,14 @@
if (assignment != null) {
WorkerResources resources = assignment.get_resources();
- memoryLimitMB = calculateMemoryLimit(resources, getMemOnHeap(resources));
+ memoryLimitMb = calculateMemoryLimit(resources, getMemOnHeap(resources));
}
}
- private static void removeWorkersOn(Map<String, Integer> workerToPort, int _port) {
+ private static void removeWorkersOn(Map<String, Integer> workerToPort, int port) {
for (Iterator<Entry<String, Integer>> i = workerToPort.entrySet().iterator(); i.hasNext(); ) {
Entry<String, Integer> found = i.next();
- if (_port == found.getValue().intValue()) {
+ if (port == found.getValue().intValue()) {
LOG.warn("Deleting worker {} from state", found.getKey());
i.remove();
}
@@ -200,31 +197,31 @@
* up and running. We will lose track of the process.
*/
protected void createNewWorkerId() {
- _type.assertFull();
- assert (_workerId == null);
- synchronized (_localState) {
- _workerId = Utils.uuid();
- Map<String, Integer> workerToPort = _localState.getApprovedWorkers();
+ type.assertFull();
+ assert (workerId == null);
+ synchronized (localState) {
+ workerId = Utils.uuid();
+ Map<String, Integer> workerToPort = localState.getApprovedWorkers();
if (workerToPort == null) {
workerToPort = new HashMap<>(1);
}
- removeWorkersOn(workerToPort, _port);
- workerToPort.put(_workerId, _port);
- _localState.setApprovedWorkers(workerToPort);
- LOG.info("Created Worker ID {}", _workerId);
+ removeWorkersOn(workerToPort, port);
+ workerToPort.put(workerId, port);
+ localState.setApprovedWorkers(workerToPort);
+ LOG.info("Created Worker ID {}", workerId);
}
}
@Override
public void cleanUpForRestart() throws IOException {
- String origWorkerId = _workerId;
+ String origWorkerId = workerId;
super.cleanUpForRestart();
- synchronized (_localState) {
- Map<String, Integer> workersToPort = _localState.getApprovedWorkers();
+ synchronized (localState) {
+ Map<String, Integer> workersToPort = localState.getApprovedWorkers();
if (workersToPort != null) {
workersToPort.remove(origWorkerId);
- removeWorkersOn(workersToPort, _port);
- _localState.setApprovedWorkers(workersToPort);
+ removeWorkersOn(workersToPort, port);
+ localState.setApprovedWorkers(workersToPort);
LOG.info("Removed Worker ID {}", origWorkerId);
} else {
LOG.warn("No approved workers exists");
@@ -234,9 +231,9 @@
@Override
public void relaunch() throws IOException {
- _type.assertFull();
+ type.assertFull();
//We are launching it now...
- _type = ContainerType.LAUNCH;
+ type = ContainerType.LAUNCH;
createNewWorkerId();
setup();
launch();
@@ -244,7 +241,7 @@
@Override
public boolean didMainProcessExit() {
- return _exitedEarly;
+ return exitedEarly;
}
/**
@@ -261,7 +258,7 @@
*/
protected boolean runProfilingCommand(List<String> command, Map<String, String> env, String logPrefix,
File targetDir) throws IOException, InterruptedException {
- _type.assertFull();
+ type.assertFull();
Process p = ClientSupervisorUtils.launchProcess(command, env, logPrefix, null, targetDir);
int ret = p.waitFor();
return ret == 0;
@@ -269,21 +266,21 @@
@Override
public boolean runProfiling(ProfileRequest request, boolean stop) throws IOException, InterruptedException {
- _type.assertFull();
- String targetDir = ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port);
+ type.assertFull();
+ String targetDir = ConfigUtils.workerArtifactsRoot(conf, topologyId, port);
@SuppressWarnings("unchecked")
- Map<String, String> env = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
+ Map<String, String> env = (Map<String, String>) topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (env == null) {
env = new HashMap<>();
}
- String str = ConfigUtils.workerArtifactsPidPath(_conf, _topologyId, _port);
+ String str = ConfigUtils.workerArtifactsPidPath(conf, topologyId, port);
- String workerPid = _ops.slurpString(new File(str)).trim();
+ String workerPid = ops.slurpString(new File(str)).trim();
ProfileAction profileAction = request.get_action();
- String logPrefix = "ProfilerAction process " + _topologyId + ":" + _port + " PROFILER_ACTION: " + profileAction
+ String logPrefix = "ProfilerAction process " + topologyId + ":" + port + " PROFILER_ACTION: " + profileAction
+ " ";
List<String> command = mkProfileCommand(profileAction, stop, workerPid, targetDir);
@@ -326,27 +323,27 @@
}
private List<String> jmapDumpCmd(String pid, String targetDir) {
- return Lists.newArrayList(_profileCmd, pid, "jmap", targetDir);
+ return Lists.newArrayList(profileCmd, pid, "jmap", targetDir);
}
private List<String> jstackDumpCmd(String pid, String targetDir) {
- return Lists.newArrayList(_profileCmd, pid, "jstack", targetDir);
+ return Lists.newArrayList(profileCmd, pid, "jstack", targetDir);
}
private List<String> jprofileStart(String pid) {
- return Lists.newArrayList(_profileCmd, pid, "start");
+ return Lists.newArrayList(profileCmd, pid, "start");
}
private List<String> jprofileStop(String pid, String targetDir) {
- return Lists.newArrayList(_profileCmd, pid, "stop", targetDir);
+ return Lists.newArrayList(profileCmd, pid, "stop", targetDir);
}
private List<String> jprofileDump(String pid, String targetDir) {
- return Lists.newArrayList(_profileCmd, pid, "dump", targetDir);
+ return Lists.newArrayList(profileCmd, pid, "dump", targetDir);
}
private List<String> jprofileJvmRestart(String pid) {
- return Lists.newArrayList(_profileCmd, pid, "kill");
+ return Lists.newArrayList(profileCmd, pid, "kill");
}
/**
@@ -378,12 +375,11 @@
}
protected List<String> frameworkClasspath(SimpleVersion topoVersion) {
- File stormWorkerLibDir = new File(_stormHome, "lib-worker");
- String topoConfDir =
- System.getenv("STORM_CONF_DIR") != null ?
- System.getenv("STORM_CONF_DIR") :
- new File(_stormHome, "conf").getAbsolutePath();
- File stormExtlibDir = new File(_stormHome, "extlib");
+ File stormWorkerLibDir = new File(stormHome, "lib-worker");
+ String topoConfDir = System.getenv("STORM_CONF_DIR") != null
+ ? System.getenv("STORM_CONF_DIR")
+ : new File(stormHome, "conf").getAbsolutePath();
+ File stormExtlibDir = new File(stormHome, "extlib");
String extcp = System.getenv("STORM_EXT_CLASSPATH");
List<String> pathElements = new LinkedList<>();
pathElements.add(getWildcardDir(stormWorkerLibDir));
@@ -391,7 +387,7 @@
pathElements.add(extcp);
pathElements.add(topoConfDir);
- NavigableMap<SimpleVersion, List<String>> classpaths = Utils.getConfiguredClasspathVersions(_conf, pathElements);
+ NavigableMap<SimpleVersion, List<String>> classpaths = Utils.getConfiguredClasspathVersions(conf, pathElements);
return Utils.getCompatibleVersion(classpaths, topoVersion, "classpath", pathElements);
}
@@ -405,7 +401,7 @@
//Have not moved to a java worker yet
defaultWorkerGuess = "org.apache.storm.daemon.worker";
}
- NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerMainVersions(_conf);
+ NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerMainVersions(conf);
return Utils.getCompatibleVersion(mains, topoVersion, "worker main class", defaultWorkerGuess);
}
@@ -415,7 +411,7 @@
//Prior to the org.apache change
defaultGuess = "backtype.storm.LogWriter";
}
- NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerLogWriterVersions(_conf);
+ NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerLogWriterVersions(conf);
return Utils.getCompatibleVersion(mains, topoVersion, "worker log writer class", defaultGuess);
}
@@ -439,26 +435,26 @@
*/
protected String getWorkerClassPath(String stormJar, List<String> dependencyLocations, SimpleVersion topoVersion) {
List<String> workercp = new ArrayList<>();
- workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH_BEGINNING)));
+ workercp.addAll(asStringList(topoConf.get(Config.TOPOLOGY_CLASSPATH_BEGINNING)));
workercp.addAll(frameworkClasspath(topoVersion));
workercp.add(stormJar);
workercp.addAll(dependencyLocations);
- workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH)));
+ workercp.addAll(asStringList(topoConf.get(Config.TOPOLOGY_CLASSPATH)));
return CPJ.join(workercp);
}
private String substituteChildOptsInternal(String string, int memOnheap) {
if (StringUtils.isNotBlank(string)) {
- String p = String.valueOf(_port);
+ String p = String.valueOf(port);
string = string.replace("%ID%", p);
- string = string.replace("%WORKER-ID%", _workerId);
- string = string.replace("%TOPOLOGY-ID%", _topologyId);
+ string = string.replace("%WORKER-ID%", workerId);
+ string = string.replace("%TOPOLOGY-ID%", topologyId);
string = string.replace("%WORKER-PORT%", p);
if (memOnheap > 0) {
string = string.replace("%HEAP-MEM%", String.valueOf(memOnheap));
}
- if (memoryLimitMB > 0) {
- string = string.replace("%LIMIT-MEM%", String.valueOf(memoryLimitMB));
+ if (memoryLimitMb > 0) {
+ string = string.replace("%LIMIT-MEM%", String.valueOf(memoryLimitMb));
}
}
return string;
@@ -507,21 +503,21 @@
*/
protected void launchWorkerProcess(List<String> command, Map<String, String> env, String logPrefix,
ExitCodeCallback processExitCallback, File targetDir) throws IOException {
- if (_resourceIsolationManager != null) {
- command = _resourceIsolationManager.getLaunchCommand(_workerId, command);
+ if (resourceIsolationManager != null) {
+ command = resourceIsolationManager.getLaunchCommand(workerId, command);
}
ClientSupervisorUtils.launchProcess(command, env, logPrefix, processExitCallback, targetDir);
}
private String getWorkerLoggingConfigFile() {
- String log4jConfigurationDir = (String) (_conf.get(DaemonConfig.STORM_LOG4J2_CONF_DIR));
+ String log4jConfigurationDir = (String) (conf.get(DaemonConfig.STORM_LOG4J2_CONF_DIR));
if (StringUtils.isNotBlank(log4jConfigurationDir)) {
if (!ServerUtils.isAbsolutePath(log4jConfigurationDir)) {
- log4jConfigurationDir = _stormHome + File.separator + log4jConfigurationDir;
+ log4jConfigurationDir = stormHome + File.separator + log4jConfigurationDir;
}
} else {
- log4jConfigurationDir = _stormHome + File.separator + "log4j2";
+ log4jConfigurationDir = stormHome + File.separator + "log4j2";
}
if (ServerUtils.IS_ON_WINDOWS && !log4jConfigurationDir.startsWith("file:")) {
@@ -540,7 +536,7 @@
*/
private List<String> getClassPathParams(final String stormRoot, final SimpleVersion topoVersion) throws IOException {
final String stormJar = ConfigUtils.supervisorStormJarPath(stormRoot);
- final List<String> dependencyLocations = getDependencyLocationsFor(_conf, _topologyId, _ops, stormRoot);
+ final List<String> dependencyLocations = getDependencyLocationsFor(conf, topologyId, ops, stormRoot);
final String workerClassPath = getWorkerClassPath(stormJar, dependencyLocations, topoVersion);
List<String> classPathParams = new ArrayList<>();
@@ -556,42 +552,43 @@
* @return a list of command line options
*/
private List<String> getCommonParams() {
- final String workersArtifacts = ConfigUtils.workerArtifactsRoot(_conf);
+ final String workersArtifacts = ConfigUtils.workerArtifactsRoot(conf);
String stormLogDir = ConfigUtils.getLogDir();
List<String> commonParams = new ArrayList<>();
- commonParams.add("-Dlogging.sensitivity=" + OR((String) _topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
+ commonParams.add("-Dlogging.sensitivity=" + OR((String) topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
commonParams.add("-Dlogfile.name=worker.log");
- commonParams.add("-Dstorm.home=" + OR(_stormHome, ""));
+ commonParams.add("-Dstorm.home=" + OR(stormHome, ""));
commonParams.add("-Dworkers.artifacts=" + workersArtifacts);
- commonParams.add("-Dstorm.id=" + _topologyId);
- commonParams.add("-Dworker.id=" + _workerId);
- commonParams.add("-Dworker.port=" + _port);
+ commonParams.add("-Dstorm.id=" + topologyId);
+ commonParams.add("-Dworker.id=" + workerId);
+ commonParams.add("-Dworker.port=" + port);
commonParams.add("-Dstorm.log.dir=" + stormLogDir);
commonParams.add("-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector");
- commonParams.add("-Dstorm.local.dir=" + _conf.get(Config.STORM_LOCAL_DIR));
- if (memoryLimitMB > 0) {
- commonParams.add("-Dworker.memory_limit_mb=" + memoryLimitMB);
+ commonParams.add("-Dstorm.local.dir=" + conf.get(Config.STORM_LOCAL_DIR));
+ if (memoryLimitMb > 0) {
+ commonParams.add("-Dworker.memory_limit_mb=" + memoryLimitMb);
}
return commonParams;
}
private int getMemOnHeap(WorkerResources resources) {
int memOnheap = 0;
- if (resources != null && resources.is_set_mem_on_heap() &&
- resources.get_mem_on_heap() > 0) {
+ if (resources != null
+ && resources.is_set_mem_on_heap()
+ && resources.get_mem_on_heap() > 0) {
memOnheap = (int) Math.ceil(resources.get_mem_on_heap());
} else {
// set the default heap memory size for supervisor-test
- memOnheap = ObjectReader.getInt(_topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
+ memOnheap = ObjectReader.getInt(topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
}
return memOnheap;
}
private List<String> getWorkerProfilerChildOpts(int memOnheap) {
List<String> workerProfilerChildopts = new ArrayList<>();
- if (ObjectReader.getBoolean(_conf.get(DaemonConfig.WORKER_PROFILER_ENABLED), false)) {
- workerProfilerChildopts = substituteChildopts(_conf.get(DaemonConfig.WORKER_PROFILER_CHILDOPTS), memOnheap);
+ if (ObjectReader.getBoolean(conf.get(DaemonConfig.WORKER_PROFILER_ENABLED), false)) {
+ workerProfilerChildopts = substituteChildopts(conf.get(DaemonConfig.WORKER_PROFILER_CHILDOPTS), memOnheap);
}
return workerProfilerChildopts;
}
@@ -622,10 +619,10 @@
final String javaCmd = javaCmd("java");
final String stormOptions = ConfigUtils.concatIfNotNull(System.getProperty("storm.options"));
final String topoConfFile = ConfigUtils.concatIfNotNull(System.getProperty("storm.conf.file"));
- final String workerTmpDir = ConfigUtils.workerTmpRoot(_conf, _workerId);
- String topoVersionString = getStormVersionFor(_conf, _topologyId, _ops, stormRoot);
+ final String workerTmpDir = ConfigUtils.workerTmpRoot(conf, workerId);
+ String topoVersionString = getStormVersionFor(conf, topologyId, ops, stormRoot);
if (topoVersionString == null) {
- topoVersionString = (String) _conf.getOrDefault(Config.SUPERVISOR_WORKER_DEFAULT_VERSION, VersionInfo.getVersion());
+ topoVersionString = (String) conf.getOrDefault(Config.SUPERVISOR_WORKER_DEFAULT_VERSION, VersionInfo.getVersion());
}
final SimpleVersion topoVersion = new SimpleVersion(topoVersionString);
@@ -634,8 +631,8 @@
String log4jConfigurationFile = getWorkerLoggingConfigFile();
String workerLog4jConfig = log4jConfigurationFile;
- if (_topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE) != null) {
- workerLog4jConfig = workerLog4jConfig + "," + _topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE);
+ if (topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE) != null) {
+ workerLog4jConfig = workerLog4jConfig + "," + topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE);
}
List<String> commandList = new ArrayList<>();
@@ -644,7 +641,7 @@
//Log Writer Command...
commandList.add(javaCmd);
commandList.addAll(classPathParams);
- commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
+ commandList.addAll(substituteChildopts(topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
commandList.addAll(commonParams);
commandList.add("-Dlog4j.configurationFile=" + log4jConfigurationFile);
commandList.add(logWriter); //The LogWriter in turn launches the actual worker.
@@ -655,11 +652,11 @@
commandList.add("-server");
commandList.addAll(commonParams);
commandList.add("-Dlog4j.configurationFile=" + workerLog4jConfig);
- commandList.addAll(substituteChildopts(_conf.get(Config.WORKER_CHILDOPTS), memOnheap));
- commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
+ commandList.addAll(substituteChildopts(conf.get(Config.WORKER_CHILDOPTS), memOnheap));
+ commandList.addAll(substituteChildopts(topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
commandList.addAll(substituteChildopts(Utils.OR(
- _topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
- _conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
+ topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
+ conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
commandList.addAll(getWorkerProfilerChildOpts(memOnheap));
commandList.add("-Djava.library.path=" + jlp);
commandList.add("-Dstorm.conf.file=" + topoConfFile);
@@ -667,18 +664,18 @@
commandList.add("-Djava.io.tmpdir=" + workerTmpDir);
commandList.addAll(classPathParams);
commandList.add(getWorkerMain(topoVersion));
- commandList.add(_topologyId);
- commandList.add(_supervisorId);
+ commandList.add(topologyId);
+ commandList.add(supervisorId);
// supervisor port should be only presented to worker which supports RPC heartbeat
// unknown version should be treated as "current version", which supports RPC heartbeat
- if ((topoVersion.getMajor() == -1 && topoVersion.getMinor() == -1) ||
- topoVersion.compareTo(MIN_VERSION_SUPPORT_RPC_HEARTBEAT) >= 0) {
- commandList.add(String.valueOf(_supervisorPort));
+ if ((topoVersion.getMajor() == -1 && topoVersion.getMinor() == -1)
+ || topoVersion.compareTo(MIN_VERSION_SUPPORT_RPC_HEARTBEAT) >= 0) {
+ commandList.add(String.valueOf(supervisorPort));
}
- commandList.add(String.valueOf(_port));
- commandList.add(_workerId);
+ commandList.add(String.valueOf(port));
+ commandList.add(workerId);
return commandList;
}
@@ -688,7 +685,7 @@
if (super.isMemoryLimitViolated(withUpdatedLimits)) {
return true;
}
- if (_resourceIsolationManager != null) {
+ if (resourceIsolationManager != null) {
// In the short term the goal is to not shoot anyone unless we really need to.
// The on heap should limit the memory usage in most cases to a reasonable amount
// If someone is using way more than they requested this is a bug and we should
@@ -706,12 +703,12 @@
usageMb = getTotalTopologyMemoryUsed();
memoryLimitMb = getTotalTopologyMemoryReserved(withUpdatedLimits);
hardMemoryLimitOver = this.hardMemoryLimitOver * getTotalWorkersForThisTopology();
- typeOfCheck = "TOPOLOGY " + _topologyId;
+ typeOfCheck = "TOPOLOGY " + topologyId;
} else {
usageMb = getMemoryUsageMb();
- memoryLimitMb = this.memoryLimitMB;
+ memoryLimitMb = this.memoryLimitMb;
hardMemoryLimitOver = this.hardMemoryLimitOver;
- typeOfCheck = "WORKER " + _workerId;
+ typeOfCheck = "WORKER " + workerId;
}
LOG.debug(
"Enforcing memory usage for {} with usage of {} out of {} total and a hard limit of {}",
@@ -735,13 +732,13 @@
// to be use. If we cannot calculate it assume that it is bad
long systemFreeMemoryMb = 0;
try {
- systemFreeMemoryMb = _resourceIsolationManager.getSystemFreeMemoryMb();
+ systemFreeMemoryMb = resourceIsolationManager.getSystemFreeMemoryMb();
} catch (IOException e) {
LOG.warn("Error trying to calculate free memory on the system {}", e);
}
LOG.debug("SYSTEM MEMORY FREE {} MB", systemFreeMemoryMb);
//If the system is low on memory we cannot be kind and need to shoot something
- if (systemFreeMemoryMb <= lowMemoryThresholdMB) {
+ if (systemFreeMemoryMb <= lowMemoryThresholdMb) {
LOG.warn(
"{} is using {} MB > memory limit {} MB and system is low on memory {} free",
typeOfCheck,
@@ -784,8 +781,8 @@
public long getMemoryUsageMb() {
try {
long ret = 0;
- if (_resourceIsolationManager != null) {
- long usageBytes = _resourceIsolationManager.getMemoryUsage(_workerId);
+ if (resourceIsolationManager != null) {
+ long usageBytes = resourceIsolationManager.getMemoryUsage(workerId);
if (usageBytes >= 0) {
ret = usageBytes / 1024 / 1024;
}
@@ -799,18 +796,18 @@
@Override
public long getMemoryReservationMb() {
- return memoryLimitMB;
+ return memoryLimitMb;
}
private long calculateMemoryLimit(final WorkerResources resources, final int memOnHeap) {
long ret = memOnHeap;
- if (_resourceIsolationManager != null) {
+ if (resourceIsolationManager != null) {
final int memoffheap = (int) Math.ceil(resources.get_mem_off_heap());
final int extraMem =
(int)
(Math.ceil(
ObjectReader.getDouble(
- _conf.get(DaemonConfig.STORM_SUPERVISOR_MEMORY_LIMIT_TOLERANCE_MARGIN_MB),
+ conf.get(DaemonConfig.STORM_SUPERVISOR_MEMORY_LIMIT_TOLERANCE_MARGIN_MB),
0.0)));
ret += memoffheap + extraMem;
}
@@ -819,146 +816,147 @@
@Override
public void launch() throws IOException {
- _type.assertFull();
- LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", _assignment,
- _supervisorId, _port, _workerId);
- String logPrefix = "Worker Process " + _workerId;
- ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
- _exitedEarly = false;
+ type.assertFull();
+ LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", assignment,
+ supervisorId, port, workerId);
+ exitedEarly = false;
- final WorkerResources resources = _assignment.get_resources();
+ final WorkerResources resources = assignment.get_resources();
final int memOnHeap = getMemOnHeap(resources);
- memoryLimitMB = calculateMemoryLimit(resources, memOnHeap);
- final String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
- String jlp = javaLibraryPath(stormRoot, _conf);
+ memoryLimitMb = calculateMemoryLimit(resources, memOnHeap);
+ final String stormRoot = ConfigUtils.supervisorStormDistRoot(conf, topologyId);
+ String jlp = javaLibraryPath(stormRoot, conf);
Map<String, String> topEnvironment = new HashMap<String, String>();
@SuppressWarnings("unchecked")
- Map<String, String> environment = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
+ Map<String, String> environment = (Map<String, String>) topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (environment != null) {
topEnvironment.putAll(environment);
}
- String ld_library_path = topEnvironment.get("LD_LIBRARY_PATH");
- if (ld_library_path != null) {
- jlp = jlp + System.getProperty("path.separator") + ld_library_path;
+ String ldLibraryPath = topEnvironment.get("LD_LIBRARY_PATH");
+ if (ldLibraryPath != null) {
+ jlp = jlp + System.getProperty("path.separator") + ldLibraryPath;
}
topEnvironment.put("LD_LIBRARY_PATH", jlp);
- if (_resourceIsolationManager != null) {
+ if (resourceIsolationManager != null) {
final int cpu = (int) Math.ceil(resources.get_cpu());
//Save the memory limit so we can enforce it less strictly
- _resourceIsolationManager.reserveResourcesForWorker(_workerId, (int) memoryLimitMB, cpu);
+ resourceIsolationManager.reserveResourcesForWorker(workerId, (int) memoryLimitMb, cpu);
}
List<String> commandList = mkLaunchCommand(memOnHeap, stormRoot, jlp);
LOG.info("Launching worker with command: {}. ", ServerUtils.shellCmd(commandList));
- String workerDir = ConfigUtils.workerRoot(_conf, _workerId);
+ String workerDir = ConfigUtils.workerRoot(conf, workerId);
+ String logPrefix = "Worker Process " + workerId;
+ ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
launchWorkerProcess(commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir));
}
private static class TopologyMetaData {
- private final Map<String, Object> _conf;
- private final String _topologyId;
- private final AdvancedFSOps _ops;
- private final String _stormRoot;
- private boolean _dataCached = false;
- private List<String> _depLocs = null;
- private String _stormVersion = null;
+ private final Map<String, Object> conf;
+ private final String topologyId;
+ private final AdvancedFSOps ops;
+ private final String stormRoot;
+ private boolean dataCached = false;
+ private List<String> depLocs = null;
+ private String stormVersion = null;
public TopologyMetaData(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, final String stormRoot) {
- _conf = conf;
- _topologyId = topologyId;
- _ops = ops;
- _stormRoot = stormRoot;
+ this.conf = conf;
+ this.topologyId = topologyId;
+ this.ops = ops;
+ this.stormRoot = stormRoot;
}
+ @Override
public String toString() {
List<String> data;
String stormVersion;
synchronized (this) {
- data = _depLocs;
- stormVersion = _stormVersion;
+ data = depLocs;
+ stormVersion = this.stormVersion;
}
- return "META for " + _topologyId + " DEP_LOCS => " + data + " STORM_VERSION => " + stormVersion;
+ return "META for " + topologyId + " DEP_LOCS => " + data + " STORM_VERSION => " + stormVersion;
}
private synchronized void readData() throws IOException {
- final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(_conf, _topologyId, _ops);
+ final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(conf, topologyId, ops);
final List<String> dependencyLocations = new ArrayList<>();
if (stormTopology.get_dependency_jars() != null) {
for (String dependency : stormTopology.get_dependency_jars()) {
- dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
+ dependencyLocations.add(new File(stormRoot, dependency).getAbsolutePath());
}
}
if (stormTopology.get_dependency_artifacts() != null) {
for (String dependency : stormTopology.get_dependency_artifacts()) {
- dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
+ dependencyLocations.add(new File(stormRoot, dependency).getAbsolutePath());
}
}
- _depLocs = dependencyLocations;
- _stormVersion = stormTopology.get_storm_version();
- _dataCached = true;
+ depLocs = dependencyLocations;
+ stormVersion = stormTopology.get_storm_version();
+ dataCached = true;
}
public synchronized List<String> getDepLocs() throws IOException {
- if (!_dataCached) {
+ if (!dataCached) {
readData();
}
- return _depLocs;
+ return depLocs;
}
public synchronized String getStormVersion() throws IOException {
- if (!_dataCached) {
+ if (!dataCached) {
readData();
}
- return _stormVersion;
+ return stormVersion;
}
}
- static class TopoMetaLRUCache {
- public final int _maxSize = 100; //We could make this configurable in the future...
+ static class TopoMetaLruCache {
+ public final int maxSize = 100; //We could make this configurable in the future...
@SuppressWarnings("serial")
- private LinkedHashMap<String, TopologyMetaData> _cache = new LinkedHashMap<String, TopologyMetaData>() {
+ private LinkedHashMap<String, TopologyMetaData> cache = new LinkedHashMap<String, TopologyMetaData>() {
@Override
protected boolean removeEldestEntry(Map.Entry<String, TopologyMetaData> eldest) {
- return (size() > _maxSize);
+ return (size() > maxSize);
}
};
public synchronized TopologyMetaData get(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops,
String stormRoot) {
//Only go off of the topology id for now.
- TopologyMetaData dl = _cache.get(topologyId);
+ TopologyMetaData dl = cache.get(topologyId);
if (dl == null) {
- _cache.putIfAbsent(topologyId, new TopologyMetaData(conf, topologyId, ops, stormRoot));
- dl = _cache.get(topologyId);
+ cache.putIfAbsent(topologyId, new TopologyMetaData(conf, topologyId, ops, stormRoot));
+ dl = cache.get(topologyId);
}
return dl;
}
public synchronized void clear() {
- _cache.clear();
+ cache.clear();
}
}
private class ProcessExitCallback implements ExitCodeCallback {
- private final String _logPrefix;
+ private final String logPrefix;
public ProcessExitCallback(String logPrefix) {
- _logPrefix = logPrefix;
+ this.logPrefix = logPrefix;
}
@Override
public void call(int exitCode) {
- LOG.info("{} exited with code: {}", _logPrefix, exitCode);
- _exitedEarly = true;
+ LOG.info("{} exited with code: {}", logPrefix, exitCode);
+ exitedEarly = true;
}
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
index 9c000cb..51dceb4 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
@@ -21,31 +21,31 @@
import org.apache.storm.utils.LocalState;
/**
- * Launch containers with no security using standard java commands
+ * Launch containers with no security using standard java commands.
*/
public class BasicContainerLauncher extends ContainerLauncher {
- protected final ResourceIsolationInterface _resourceIsolationManager;
- private final Map<String, Object> _conf;
- private final String _supervisorId;
- private final int _supervisorPort;
+ protected final ResourceIsolationInterface resourceIsolationManager;
+ private final Map<String, Object> conf;
+ private final String supervisorId;
+ private final int supervisorPort;
private final StormMetricsRegistry metricsRegistry;
private final ContainerMemoryTracker containerMemoryTracker;
public BasicContainerLauncher(Map<String, Object> conf, String supervisorId, int supervisorPort,
ResourceIsolationInterface resourceIsolationManager, StormMetricsRegistry metricsRegistry,
ContainerMemoryTracker containerMemoryTracker) throws IOException {
- _conf = conf;
- _supervisorId = supervisorId;
- _supervisorPort = supervisorPort;
- _resourceIsolationManager = resourceIsolationManager;
+ this.conf = conf;
+ this.supervisorId = supervisorId;
+ this.supervisorPort = supervisorPort;
+ this.resourceIsolationManager = resourceIsolationManager;
this.metricsRegistry = metricsRegistry;
this.containerMemoryTracker = containerMemoryTracker;
}
@Override
public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
- Container container = new BasicContainer(ContainerType.LAUNCH, _conf, _supervisorId, _supervisorPort, port,
- assignment, _resourceIsolationManager, state, null, metricsRegistry,
+ Container container = new BasicContainer(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port,
+ assignment, resourceIsolationManager, state, null, metricsRegistry,
containerMemoryTracker);
container.setup();
container.launch();
@@ -54,13 +54,13 @@
@Override
public Container recoverContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
- return new BasicContainer(ContainerType.RECOVER_FULL, _conf, _supervisorId, _supervisorPort, port, assignment,
- _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker);
+ return new BasicContainer(ContainerType.RECOVER_FULL, conf, supervisorId, supervisorPort, port, assignment,
+ resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker);
}
@Override
public Killable recoverContainer(String workerId, LocalState localState) throws IOException {
- return new BasicContainer(ContainerType.RECOVER_PARTIAL, _conf, _supervisorId, _supervisorPort, -1, null,
- _resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker);
+ return new BasicContainer(ContainerType.RECOVER_PARTIAL, conf, supervisorId, supervisorPort, -1, null,
+ resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker);
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
index 8b58483..a492f16 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
@@ -74,18 +74,18 @@
private final Timer shutdownDuration;
private final Timer cleanupDuration;
- protected final Map<String, Object> _conf;
- protected final Map<String, Object> _topoConf; //Not set if RECOVER_PARTIAL
- protected final String _topologyId; //Not set if RECOVER_PARTIAL
- protected final String _supervisorId;
- protected final int _supervisorPort;
- protected final int _port; //Not set if RECOVER_PARTIAL
- protected final LocalAssignment _assignment; //Not set if RECOVER_PARTIAL
- protected final AdvancedFSOps _ops;
- protected final ResourceIsolationInterface _resourceIsolationManager;
- protected final boolean _symlinksDisabled;
- protected String _workerId;
- protected ContainerType _type;
+ protected final Map<String, Object> conf;
+ protected final Map<String, Object> topoConf; //Not set if RECOVER_PARTIAL
+ protected final String topologyId; //Not set if RECOVER_PARTIAL
+ protected final String supervisorId;
+ protected final int supervisorPort;
+ protected final int port; //Not set if RECOVER_PARTIAL
+ protected final LocalAssignment assignment; //Not set if RECOVER_PARTIAL
+ protected final AdvancedFSOps ops;
+ protected final ResourceIsolationInterface resourceIsolationManager;
+ protected final boolean symlinksDisabled;
+ protected String workerId;
+ protected ContainerType type;
protected ContainerMemoryTracker containerMemoryTracker;
private long lastMetricProcessTime = 0L;
private Timer.Context shutdownTimer = null;
@@ -98,7 +98,7 @@
* @param supervisorId the ID of the supervisor this is a part of.
* @param supervisorPort the thrift server port of the supervisor this is a part of.
* @param port the port the container is on. Should be <= 0 if only a partial recovery @param assignment
- * the assignment for this container. Should be null if only a partial recovery.
+ * the assignment for this container. Should be null if only a partial recovery.
* @param resourceIsolationManager used to isolate resources for a container can be null if no isolation is used.
* @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
* @param topoConf the config of the topology (mostly for testing) if null and not a partial recovery the real conf is read.
@@ -115,44 +115,44 @@
assert (conf != null);
assert (supervisorId != null);
- _symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
+ symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
if (ops == null) {
ops = AdvancedFSOps.make(conf);
}
- _workerId = workerId;
- _type = type;
- _port = port;
- _ops = ops;
- _conf = conf;
- _supervisorId = supervisorId;
- _supervisorPort = supervisorPort;
- _resourceIsolationManager = resourceIsolationManager;
- _assignment = assignment;
+ this.workerId = workerId;
+ this.type = type;
+ this.port = port;
+ this.ops = ops;
+ this.conf = conf;
+ this.supervisorId = supervisorId;
+ this.supervisorPort = supervisorPort;
+ this.resourceIsolationManager = resourceIsolationManager;
+ this.assignment = assignment;
- if (_type.isOnlyKillable()) {
- assert (_assignment == null);
- assert (_port <= 0);
- assert (_workerId != null);
- _topologyId = null;
- _topoConf = null;
+ if (this.type.isOnlyKillable()) {
+ assert (this.assignment == null);
+ assert (this.port <= 0);
+ assert (this.workerId != null);
+ topologyId = null;
+ this.topoConf = null;
} else {
assert (assignment != null);
assert (port > 0);
- _topologyId = assignment.get_topology_id();
- if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) {
+ topologyId = assignment.get_topology_id();
+ if (!this.ops.doRequiredTopoFilesExist(this.conf, topologyId)) {
LOG.info(
"Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}",
- _assignment,
- _supervisorId, _port, _workerId);
+ this.assignment,
+ this.supervisorId, this.port, this.workerId);
throw new ContainerRecoveryException("Missing required topology files...");
}
if (topoConf == null) {
- _topoConf = readTopoConf();
+ this.topoConf = readTopoConf();
} else {
//For testing...
- _topoConf = topoConf;
+ this.topoConf = topoConf;
}
}
this.numCleanupExceptions = metricsRegistry.registerMeter("supervisor:num-cleanup-exceptions");
@@ -166,37 +166,26 @@
@Override
public String toString() {
- return "topo:" + _topologyId + " worker:" + _workerId;
+ return "topo:" + topologyId + " worker:" + workerId;
}
protected Map<String, Object> readTopoConf() throws IOException {
- assert (_topologyId != null);
- return ConfigUtils.readSupervisorStormConf(_conf, _topologyId);
+ assert (topologyId != null);
+ return ConfigUtils.readSupervisorStormConf(conf, topologyId);
}
/**
* Kill a given process.
*
* @param pid the id of the process to kill
- * @throws IOException
*/
protected void kill(long pid) throws IOException {
ServerUtils.killProcessWithSigTerm(String.valueOf(pid));
}
- /**
- * Kill a given process.
- *
- * @param pid the id of the process to kill
- * @throws IOException
- */
- protected void forceKill(long pid) throws IOException {
- ServerUtils.forceKillProcess(String.valueOf(pid));
- }
-
@Override
public void kill() throws IOException {
- LOG.info("Killing {}:{}", _supervisorId, _workerId);
+ LOG.info("Killing {}:{}", supervisorId, workerId);
if (shutdownTimer == null) {
shutdownTimer = shutdownDuration.time();
}
@@ -212,9 +201,18 @@
}
}
+ /**
+ * Kill a given process.
+ *
+ * @param pid the id of the process to kill
+ */
+ protected void forceKill(long pid) throws IOException {
+ ServerUtils.forceKillProcess(String.valueOf(pid));
+ }
+
@Override
public void forceKill() throws IOException {
- LOG.info("Force Killing {}:{}", _supervisorId, _workerId);
+ LOG.info("Force Killing {}:{}", supervisorId, workerId);
numForceKill.mark();
try {
Set<Long> pids = getAllPids();
@@ -236,9 +234,9 @@
* @throws IOException on any error
*/
public LSWorkerHeartbeat readHeartbeat() throws IOException {
- LocalState localState = ConfigUtils.workerState(_conf, _workerId);
+ LocalState localState = ConfigUtils.workerState(conf, workerId);
LSWorkerHeartbeat hb = localState.getWorkerHeartBeat();
- LOG.trace("{}: Reading heartbeat {}", _workerId, hb);
+ LOG.trace("{}: Reading heartbeat {}", workerId, hb);
return hb;
}
@@ -319,7 +317,7 @@
for (Long pid : pids) {
LOG.debug("Checking if pid {} owner {} is alive", pid, user);
if (!isProcessAlive(pid, user)) {
- LOG.debug("{}: PID {} is dead", _workerId, pid);
+ LOG.debug("{}: PID {} is dead", workerId, pid);
} else {
allDead = false;
break;
@@ -337,7 +335,7 @@
@Override
public void cleanUp() throws IOException {
try (Timer.Context t = cleanupDuration.time()) {
- containerMemoryTracker.remove(_port);
+ containerMemoryTracker.remove(port);
cleanUpForRestart();
} catch (IOException e) {
//This may or may not be reported depending on when process exits
@@ -353,23 +351,23 @@
* @throws IOException on any error
*/
protected void setup() throws IOException {
- _type.assertFull();
- if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) {
+ type.assertFull();
+ if (!ops.doRequiredTopoFilesExist(conf, topologyId)) {
LOG.info("Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}",
- _assignment,
- _supervisorId, _port, _workerId);
+ assignment,
+ supervisorId, port, workerId);
throw new IllegalStateException("Not all needed files are here!!!!");
}
- LOG.info("Setting up {}:{}", _supervisorId, _workerId);
+ LOG.info("Setting up {}:{}", supervisorId, workerId);
- _ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)));
- _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)));
- _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)));
+ ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(conf, workerId)));
+ ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(conf, workerId)));
+ ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(conf, workerId)));
- File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port));
- if (!_ops.fileExists(workerArtifacts)) {
- _ops.forceMkdir(workerArtifacts);
- _ops.setupWorkerArtifactsDir(_assignment.get_owner(), workerArtifacts);
+ File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(conf, topologyId, port));
+ if (!ops.fileExists(workerArtifacts)) {
+ ops.forceMkdir(workerArtifacts);
+ ops.setupWorkerArtifactsDir(assignment.get_owner(), workerArtifacts);
}
String user = getWorkerUser();
@@ -387,43 +385,43 @@
*/
@SuppressWarnings("unchecked")
protected void writeLogMetadata(String user) throws IOException {
- _type.assertFull();
+ type.assertFull();
Map<String, Object> data = new HashMap<>();
data.put(Config.TOPOLOGY_SUBMITTER_USER, user);
- data.put("worker-id", _workerId);
+ data.put("worker-id", workerId);
Set<String> logsGroups = new HashSet<>();
- if (_topoConf.get(DaemonConfig.LOGS_GROUPS) != null) {
- List<String> groups = (List<String>) _topoConf.get(DaemonConfig.LOGS_GROUPS);
+ if (topoConf.get(DaemonConfig.LOGS_GROUPS) != null) {
+ List<String> groups = (List<String>) topoConf.get(DaemonConfig.LOGS_GROUPS);
for (String group : groups) {
logsGroups.add(group);
}
}
- if (_topoConf.get(Config.TOPOLOGY_GROUPS) != null) {
- List<String> topGroups = (List<String>) _topoConf.get(Config.TOPOLOGY_GROUPS);
+ if (topoConf.get(Config.TOPOLOGY_GROUPS) != null) {
+ List<String> topGroups = (List<String>) topoConf.get(Config.TOPOLOGY_GROUPS);
logsGroups.addAll(topGroups);
}
data.put(DaemonConfig.LOGS_GROUPS, logsGroups.toArray());
Set<String> logsUsers = new HashSet<>();
- if (_topoConf.get(DaemonConfig.LOGS_USERS) != null) {
- List<String> logUsers = (List<String>) _topoConf.get(DaemonConfig.LOGS_USERS);
+ if (topoConf.get(DaemonConfig.LOGS_USERS) != null) {
+ List<String> logUsers = (List<String>) topoConf.get(DaemonConfig.LOGS_USERS);
for (String logUser : logUsers) {
logsUsers.add(logUser);
}
}
- if (_topoConf.get(Config.TOPOLOGY_USERS) != null) {
- List<String> topUsers = (List<String>) _topoConf.get(Config.TOPOLOGY_USERS);
+ if (topoConf.get(Config.TOPOLOGY_USERS) != null) {
+ List<String> topUsers = (List<String>) topoConf.get(Config.TOPOLOGY_USERS);
for (String logUser : topUsers) {
logsUsers.add(logUser);
}
}
data.put(DaemonConfig.LOGS_USERS, logsUsers.toArray());
- File file = ServerConfigUtils.getLogMetaDataFile(_conf, _topologyId, _port);
+ File file = ServerConfigUtils.getLogMetaDataFile(conf, topologyId, port);
Yaml yaml = new Yaml();
- try (Writer writer = _ops.getWriter(file)) {
+ try (Writer writer = ops.getWriter(file)) {
yaml.dump(data, writer);
}
}
@@ -434,13 +432,13 @@
* @throws IOException on any error
*/
protected void createArtifactsLink() throws IOException {
- _type.assertFull();
- if (!_symlinksDisabled) {
- File workerDir = new File(ConfigUtils.workerRoot(_conf, _workerId));
- File topoDir = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port));
- if (_ops.fileExists(workerDir)) {
- LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", _workerId, _topologyId);
- _ops.createSymlink(new File(workerDir, "artifacts"), topoDir);
+ type.assertFull();
+ if (!symlinksDisabled) {
+ File workerDir = new File(ConfigUtils.workerRoot(conf, workerId));
+ File topoDir = new File(ConfigUtils.workerArtifactsRoot(conf, topologyId, port));
+ if (ops.fileExists(workerDir)) {
+ LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", workerId, topologyId);
+ ops.createSymlink(new File(workerDir, "artifacts"), topoDir);
}
}
}
@@ -451,12 +449,12 @@
* @throws IOException on any error.
*/
protected void createBlobstoreLinks() throws IOException {
- _type.assertFull();
- String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
- String workerRoot = ConfigUtils.workerRoot(_conf, _workerId);
+ type.assertFull();
+ String stormRoot = ConfigUtils.supervisorStormDistRoot(conf, topologyId);
+ String workerRoot = ConfigUtils.workerRoot(conf, workerId);
@SuppressWarnings("unchecked")
- Map<String, Map<String, Object>> blobstoreMap = (Map<String, Map<String, Object>>) _topoConf.get(Config.TOPOLOGY_BLOBSTORE_MAP);
+ Map<String, Map<String, Object>> blobstoreMap = (Map<String, Map<String, Object>>) topoConf.get(Config.TOPOLOGY_BLOBSTORE_MAP);
List<String> blobFileNames = new ArrayList<>();
if (blobstoreMap != null) {
for (Map.Entry<String, Map<String, Object>> entry : blobstoreMap.entrySet()) {
@@ -478,17 +476,17 @@
}
resourceFileNames.addAll(blobFileNames);
- if (!_symlinksDisabled) {
- LOG.info("Creating symlinks for worker-id: {} storm-id: {} for files({}): {}", _workerId, _topologyId, resourceFileNames.size(),
+ if (!symlinksDisabled) {
+ LOG.info("Creating symlinks for worker-id: {} storm-id: {} for files({}): {}", workerId, topologyId, resourceFileNames.size(),
resourceFileNames);
if (targetResourcesDir.exists()) {
- _ops.createSymlink(new File(workerRoot, ServerConfigUtils.RESOURCES_SUBDIR), targetResourcesDir);
+ ops.createSymlink(new File(workerRoot, ServerConfigUtils.RESOURCES_SUBDIR), targetResourcesDir);
} else {
- LOG.info("Topology jar for worker-id: {} storm-id: {} does not contain re sources directory {}.", _workerId, _topologyId,
+ LOG.info("Topology jar for worker-id: {} storm-id: {} does not contain re sources directory {}.", workerId, topologyId,
targetResourcesDir.toString());
}
for (String fileName : blobFileNames) {
- _ops.createSymlink(new File(workerRoot, fileName),
+ ops.createSymlink(new File(workerRoot, fileName),
new File(stormRoot, fileName));
}
} else if (blobFileNames.size() > 0) {
@@ -497,16 +495,17 @@
}
/**
+ * Get all PIDs.
* @return all of the pids that are a part of this container.
*/
protected Set<Long> getAllPids() throws IOException {
Set<Long> ret = new HashSet<>();
- for (String listing : ConfigUtils.readDirContents(ConfigUtils.workerPidsRoot(_conf, _workerId))) {
+ for (String listing : ConfigUtils.readDirContents(ConfigUtils.workerPidsRoot(conf, workerId))) {
ret.add(Long.valueOf(listing));
}
- if (_resourceIsolationManager != null) {
- Set<Long> morePids = _resourceIsolationManager.getRunningPids(_workerId);
+ if (resourceIsolationManager != null) {
+ Set<Long> morePids = resourceIsolationManager.getRunningPids(workerId);
assert (morePids != null);
ret.addAll(morePids);
}
@@ -515,34 +514,35 @@
}
/**
+ * Get worker user.
* @return the user that some operations should be done as.
*
* @throws IOException on any error
*/
protected String getWorkerUser() throws IOException {
- LOG.info("GET worker-user for {}", _workerId);
- File file = new File(ConfigUtils.workerUserFile(_conf, _workerId));
+ LOG.info("GET worker-user for {}", workerId);
+ File file = new File(ConfigUtils.workerUserFile(conf, workerId));
- if (_ops.fileExists(file)) {
- return _ops.slurpString(file).trim();
- } else if (_assignment != null && _assignment.is_set_owner()) {
- return _assignment.get_owner();
+ if (ops.fileExists(file)) {
+ return ops.slurpString(file).trim();
+ } else if (assignment != null && assignment.is_set_owner()) {
+ return assignment.get_owner();
}
- if (ConfigUtils.isLocalMode(_conf)) {
+ if (ConfigUtils.isLocalMode(conf)) {
return System.getProperty("user.name");
} else {
- File f = new File(ConfigUtils.workerArtifactsRoot(_conf));
+ File f = new File(ConfigUtils.workerArtifactsRoot(conf));
if (f.exists()) {
return Files.getOwner(f.toPath()).getName();
}
- throw new IllegalStateException("Could not recover the user for " + _workerId);
+ throw new IllegalStateException("Could not recover the user for " + workerId);
}
}
/**
* Returns the user that the worker process is running as.
*
- * The default behavior is to launch the worker as the user supervisor is running as (e.g. 'storm')
+ * <p>The default behavior is to launch the worker as the user supervisor is running as (e.g. 'storm')
*
* @return the user that the worker process is running as.
*/
@@ -551,14 +551,14 @@
}
protected void saveWorkerUser(String user) throws IOException {
- _type.assertFull();
- LOG.info("SET worker-user {} {}", _workerId, user);
- _ops.dump(new File(ConfigUtils.workerUserFile(_conf, _workerId)), user);
+ type.assertFull();
+ LOG.info("SET worker-user {} {}", workerId, user);
+ ops.dump(new File(ConfigUtils.workerUserFile(conf, workerId)), user);
}
protected void deleteSavedWorkerUser() throws IOException {
- LOG.info("REMOVE worker-user {}", _workerId);
- _ops.deleteIfExists(new File(ConfigUtils.workerUserFile(_conf, _workerId)));
+ LOG.info("REMOVE worker-user {}", workerId);
+ ops.deleteIfExists(new File(ConfigUtils.workerUserFile(conf, workerId)));
}
/**
@@ -568,28 +568,28 @@
* @throws IOException on any error
*/
public void cleanUpForRestart() throws IOException {
- LOG.info("Cleaning up {}:{}", _supervisorId, _workerId);
+ LOG.info("Cleaning up {}:{}", supervisorId, workerId);
Set<Long> pids = getAllPids();
String user = getWorkerUser();
for (Long pid : pids) {
- File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid));
- _ops.deleteIfExists(path, user, _workerId);
+ File path = new File(ConfigUtils.workerPidPath(conf, workerId, pid));
+ ops.deleteIfExists(path, user, workerId);
}
//clean up for resource isolation if enabled
- if (_resourceIsolationManager != null) {
- _resourceIsolationManager.releaseResourcesForWorker(_workerId);
+ if (resourceIsolationManager != null) {
+ resourceIsolationManager.releaseResourcesForWorker(workerId);
}
//Always make sure to clean up everything else before worker directory
//is removed since that is what is going to trigger the retry for cleanup
- _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId);
- _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId);
- _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId);
- _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId);
+ ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(conf, workerId)), user, workerId);
+ ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(conf, workerId)), user, workerId);
+ ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(conf, workerId)), user, workerId);
+ ops.deleteIfExists(new File(ConfigUtils.workerRoot(conf, workerId)), user, workerId);
deleteSavedWorkerUser();
- _workerId = null;
+ workerId = null;
}
/**
@@ -604,11 +604,11 @@
}
protected void updateMemoryAccounting() {
- _type.assertFull();
+ type.assertFull();
long used = getMemoryUsageMb();
long reserved = getMemoryReservationMb();
- containerMemoryTracker.setUsedMemoryMb(_port, _topologyId, used);
- containerMemoryTracker.setReservedMemoryMb(_port, _topologyId, reserved);
+ containerMemoryTracker.setUsedMemoryMb(port, topologyId, used);
+ containerMemoryTracker.setReservedMemoryMb(port, topologyId, reserved);
}
/**
@@ -616,7 +616,7 @@
*/
public long getTotalTopologyMemoryUsed() {
updateMemoryAccounting();
- return containerMemoryTracker.getUsedMemoryMb(_topologyId);
+ return containerMemoryTracker.getUsedMemoryMb(topologyId);
}
/**
@@ -628,7 +628,7 @@
public long getTotalTopologyMemoryReserved(LocalAssignment withUpdatedLimits) {
updateMemoryAccounting();
long ret =
- containerMemoryTracker.getReservedMemoryMb(_topologyId);
+ containerMemoryTracker.getReservedMemoryMb(topologyId);
if (withUpdatedLimits.is_set_total_node_shared()) {
ret += withUpdatedLimits.get_total_node_shared();
}
@@ -639,7 +639,7 @@
* Get the number of workers for this topology.
*/
public long getTotalWorkersForThisTopology() {
- return containerMemoryTracker.getAssignedWorkerCount(_topologyId);
+ return containerMemoryTracker.getAssignedWorkerCount(topologyId);
}
/**
@@ -691,7 +691,7 @@
* Get the id of the container or null if there is no worker id right now.
*/
public String getWorkerId() {
- return _workerId;
+ return workerId;
}
/**
@@ -699,7 +699,7 @@
*/
void processMetrics(OnlyLatestExecutor<Integer> exec, WorkerMetricsProcessor processor) {
try {
- Optional<Long> usedMemoryForPort = containerMemoryTracker.getUsedMemoryMb(_port);
+ Optional<Long> usedMemoryForPort = containerMemoryTracker.getUsedMemoryMb(port);
if (usedMemoryForPort.isPresent()) {
// Make sure we don't process too frequently.
long nextMetricProcessTime = this.lastMetricProcessTime + 60L * 1000L;
@@ -712,16 +712,19 @@
// create metric for memory
long timestamp = System.currentTimeMillis();
- WorkerMetricPoint workerMetric = new WorkerMetricPoint(MEMORY_USED_METRIC, timestamp, usedMemoryForPort.get(), SYSTEM_COMPONENT_ID,
- INVALID_EXECUTOR_ID, INVALID_STREAM_ID);
+ WorkerMetricPoint workerMetric = new WorkerMetricPoint(MEMORY_USED_METRIC,
+ timestamp,
+ usedMemoryForPort.get(),
+ SYSTEM_COMPONENT_ID,
+ INVALID_EXECUTOR_ID, INVALID_STREAM_ID);
WorkerMetricList metricList = new WorkerMetricList();
metricList.add_to_metrics(workerMetric);
- WorkerMetrics metrics = new WorkerMetrics(_topologyId, _port, hostname, metricList);
+ WorkerMetrics metrics = new WorkerMetrics(topologyId, port, hostname, metricList);
- exec.execute(_port, () -> {
+ exec.execute(port, () -> {
try {
- processor.processWorkerMetrics(_conf, metrics);
+ processor.processWorkerMetrics(conf, metrics);
} catch (MetricException e) {
LOG.error("Failed to process metrics", e);
}
@@ -739,26 +742,26 @@
RECOVER_FULL(true, false),
RECOVER_PARTIAL(true, true);
- private final boolean _recovery;
- private final boolean _onlyKillable;
+ private final boolean recovery;
+ private final boolean onlyKillable;
ContainerType(boolean recovery, boolean onlyKillable) {
- _recovery = recovery;
- _onlyKillable = onlyKillable;
+ this.recovery = recovery;
+ this.onlyKillable = onlyKillable;
}
public boolean isRecovery() {
- return _recovery;
+ return recovery;
}
public void assertFull() {
- if (_onlyKillable) {
+ if (onlyKillable) {
throw new IllegalStateException("Container is only Killable.");
}
}
public boolean isOnlyKillable() {
- return _onlyKillable;
+ return onlyKillable;
}
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
index b310018..d5ae161 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
@@ -28,7 +28,7 @@
import org.slf4j.LoggerFactory;
/**
- * Launches containers
+ * Launches containers.
*/
public abstract class ContainerLauncher {
private static final Logger LOG = LoggerFactory.getLogger(ContainerLauncher.class);
@@ -76,7 +76,7 @@
}
/**
- * Launch a container in a given slot
+ * Launch a container in a given slot.
* @param port the port to run this on
* @param assignment what to launch
* @param state the current state of the supervisor
@@ -86,7 +86,7 @@
public abstract Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException;
/**
- * Recover a container for a running process
+ * Recover a container for a running process.
* @param port the port the assignment is running on
* @param assignment the assignment that was launched
* @param state the current state of the supervisor
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
index b5fc1f8..6d0924e 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.storm.daemon.supervisor;
import java.util.Optional;
@@ -21,17 +22,14 @@
public class ContainerMemoryTracker {
- private final ConcurrentHashMap<Integer, TopoAndMemory> usedMemory =
- new ConcurrentHashMap<>();
- private final ConcurrentHashMap<Integer, TopoAndMemory> reservedMemory =
- new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<Integer, TopoAndMemory> usedMemory = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<Integer, TopoAndMemory> reservedMemory = new ConcurrentHashMap<>();
public ContainerMemoryTracker(StormMetricsRegistry metricsRegistry) {
metricsRegistry.registerGauge(
"supervisor:current-used-memory-mb",
() -> {
- Long val =
- usedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
+ Long val = usedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
int ret = val.intValue();
if (val > Integer.MAX_VALUE) { // Would only happen at 2 PB so we are OK for now
ret = Integer.MAX_VALUE;
@@ -41,8 +39,7 @@
metricsRegistry.registerGauge(
"supervisor:current-reserved-memory-mb",
() -> {
- Long val =
- reservedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
+ Long val = reservedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
int ret = val.intValue();
if (val > Integer.MAX_VALUE) { // Would only happen at 2 PB so we are OK for now
ret = Integer.MAX_VALUE;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
index 420f277..78fc2de 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
@@ -21,25 +21,26 @@
* kill -15 equivalent
* @throws IOException on any error
*/
- public void kill() throws IOException;
+ void kill() throws IOException;
/**
* Kill the processes in this container violently.
* kill -9 equivalent
* @throws IOException on any error
*/
- public void forceKill() throws IOException;
+ void forceKill() throws IOException;
/**
+ * Check whether all processes are dead.
* @return true if all of the processes are dead, else false
* @throws IOException on any error
*/
- public boolean areAllProcessesDead() throws IOException;
+ boolean areAllProcessesDead() throws IOException;
/**
* Clean up the container. It is not coming back.
* by default do the same thing as when restarting.
* @throws IOException on any error
*/
- public void cleanUp() throws IOException;
+ void cleanUp() throws IOException;
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
index 228da84..b74bbf9 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
@@ -26,9 +26,9 @@
public class LocalContainer extends Container {
private static final Logger LOG = LoggerFactory.getLogger(LocalContainer.class);
- private final IContext _sharedContext;
+ private final IContext sharedContext;
private final org.apache.storm.generated.Supervisor.Iface localSupervisor;
- private volatile boolean _isAlive = false;
+ private volatile boolean isAlive = false;
public LocalContainer(Map<String, Object> conf, String supervisorId, int supervisorPort, int port,
LocalAssignment assignment, IContext sharedContext, StormMetricsRegistry metricsRegistry,
@@ -36,8 +36,8 @@
org.apache.storm.generated.Supervisor.Iface localSupervisor) throws IOException {
super(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port, assignment, null, null, null, null, metricsRegistry,
containerMemoryTracker);
- _sharedContext = sharedContext;
- _workerId = Utils.uuid();
+ this.sharedContext = sharedContext;
+ workerId = Utils.uuid();
this.localSupervisor = localSupervisor;
}
@@ -53,7 +53,7 @@
@Override
public void launch() throws IOException {
- Worker worker = new Worker(_conf, _sharedContext, _topologyId, _supervisorId, _supervisorPort, _port, _workerId,
+ Worker worker = new Worker(conf, sharedContext, topologyId, supervisorId, supervisorPort, port, workerId,
() -> {
return () -> localSupervisor;
});
@@ -63,21 +63,21 @@
throw new IOException(e);
}
saveWorkerUser(System.getProperty("user.name"));
- ProcessSimulator.registerProcess(_workerId, worker);
- _isAlive = true;
+ ProcessSimulator.registerProcess(workerId, worker);
+ isAlive = true;
}
@Override
public void kill() throws IOException {
- ProcessSimulator.killProcess(_workerId);
- _isAlive = false;
+ ProcessSimulator.killProcess(workerId);
+ isAlive = false;
//Make sure the worker is down before we try to shoot any child processes
super.kill();
}
@Override
public boolean areAllProcessesDead() throws IOException {
- return !_isAlive && super.areAllProcessesDead();
+ return !isAlive && super.areAllProcessesDead();
}
@Override
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
index d9f3f8d..5434c77 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
@@ -23,10 +23,10 @@
* Launch Containers in local mode.
*/
public class LocalContainerLauncher extends ContainerLauncher {
- private final Map<String, Object> _conf;
- private final String _supervisorId;
- private final int _supervisorPort;
- private final IContext _sharedContext;
+ private final Map<String, Object> conf;
+ private final String supervisorId;
+ private final int supervisorPort;
+ private final IContext sharedContext;
private final StormMetricsRegistry metricsRegistry;
private final ContainerMemoryTracker containerMemoryTracker;
private final org.apache.storm.generated.Supervisor.Iface localSupervisor;
@@ -35,10 +35,10 @@
IContext sharedContext, StormMetricsRegistry metricsRegistry,
ContainerMemoryTracker containerMemoryTracker,
org.apache.storm.generated.Supervisor.Iface localSupervisor) {
- _conf = conf;
- _supervisorId = supervisorId;
- _supervisorPort = supervisorPort;
- _sharedContext = sharedContext;
+ this.conf = conf;
+ this.supervisorId = supervisorId;
+ this.supervisorPort = supervisorPort;
+ this.sharedContext = sharedContext;
this.metricsRegistry = metricsRegistry;
this.containerMemoryTracker = containerMemoryTracker;
this.localSupervisor = localSupervisor;
@@ -46,8 +46,8 @@
@Override
public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
- LocalContainer ret = new LocalContainer(_conf, _supervisorId, _supervisorPort,
- port, assignment, _sharedContext, metricsRegistry, containerMemoryTracker, localSupervisor);
+ LocalContainer ret = new LocalContainer(conf, supervisorId, supervisorPort,
+ port, assignment, sharedContext, metricsRegistry, containerMemoryTracker, localSupervisor);
ret.setup();
ret.launch();
return ret;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
index a0d35d9..81f3818 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
@@ -39,7 +39,7 @@
}
/**
- * Run something in the future, but replace it with the latest if it is taking too long
+ * Run something in the future, but replace it with the latest if it is taking too long.
*
* @param key what to use to dedupe things.
* @param r what you want to run.
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
index 6b18bbe..7337927 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
@@ -62,7 +62,7 @@
private final AtomicInteger readRetry = new AtomicInteger(0);
private final String assignmentId;
private final int supervisorPort;
- private final ISupervisor iSuper;
+ private final ISupervisor supervisor;
private final AsyncLocalizer localizer;
private final ContainerLauncher launcher;
private final String host;
@@ -77,7 +77,7 @@
this.stormClusterState = supervisor.getStormClusterState();
this.assignmentId = supervisor.getAssignmentId();
this.supervisorPort = supervisor.getThriftServerPort();
- this.iSuper = supervisor.getiSupervisor();
+ this.supervisor = supervisor.getiSupervisor();
this.localizer = supervisor.getAsyncLocalizer();
this.host = supervisor.getHostName();
this.localState = supervisor.getLocalState();
@@ -126,7 +126,7 @@
private Slot mkSlot(int port) throws Exception {
return new Slot(localizer, superConf, launcher, host, port,
- localState, stormClusterState, iSuper, cachedAssignments, metricsExec, metricsProcessor, slotMetrics);
+ localState, stormClusterState, supervisor, cachedAssignments, metricsExec, metricsProcessor, slotMetrics);
}
@Override
@@ -147,12 +147,12 @@
LOG.debug("All assignment: {}", allAssignments);
LOG.debug("Topology Ids -> Profiler Actions {}", topoIdToProfilerActions);
for (Integer port : allAssignments.keySet()) {
- if (iSuper.confirmAssigned(port)) {
+ if (supervisor.confirmAssigned(port)) {
assignedPorts.add(port);
}
}
HashSet<Integer> allPorts = new HashSet<>(assignedPorts);
- iSuper.assigned(allPorts);
+ supervisor.assigned(allPorts);
allPorts.addAll(slots.keySet());
Map<Integer, Set<TopoProfileAction>> filtered = new HashMap<>();
@@ -207,7 +207,7 @@
protected Map<Integer, LocalAssignment> readAssignments(Map<String, Assignment> assignmentsSnapshot) {
try {
- Map<Integer, LocalAssignment> portLA = new HashMap<>();
+ Map<Integer, LocalAssignment> portLocalAssignment = new HashMap<>();
for (Map.Entry<String, Assignment> assignEntry : assignmentsSnapshot.entrySet()) {
String topoId = assignEntry.getKey();
Assignment assignment = assignEntry.getValue();
@@ -220,16 +220,16 @@
LocalAssignment la = entry.getValue();
- if (!portLA.containsKey(port)) {
- portLA.put(port, la);
+ if (!portLocalAssignment.containsKey(port)) {
+ portLocalAssignment.put(port, la);
} else {
throw new RuntimeException("Should not have multiple topologies assigned to one port "
- + port + " " + la + " " + portLA);
+ + port + " " + la + " " + portLocalAssignment);
}
}
}
readRetry.set(0);
- return portLA;
+ return portLocalAssignment;
} catch (RuntimeException e) {
if (readRetry.get() > 2) {
throw e;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
index dae4826..b2681ea 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
@@ -60,7 +60,7 @@
List<String> commands = Arrays.asList("signal", String.valueOf(pid), String.valueOf(signal));
String user = getWorkerUser();
String logPrefix = "kill -" + signal + " " + pid;
- ClientSupervisorUtils.processLauncherAndWait(_conf, user, commands, null, logPrefix);
+ ClientSupervisorUtils.processLauncherAndWait(conf, user, commands, null, logPrefix);
}
@Override
@@ -81,15 +81,15 @@
LOG.info("Running as user: {} command: {}", user, command);
String containerFile = ServerUtils.containerFilePath(td);
if (Utils.checkFileExists(containerFile)) {
- SupervisorUtils.rmrAsUser(_conf, containerFile, containerFile);
+ SupervisorUtils.rmrAsUser(conf, containerFile, containerFile);
}
String scriptFile = ServerUtils.scriptFilePath(td);
if (Utils.checkFileExists(scriptFile)) {
- SupervisorUtils.rmrAsUser(_conf, scriptFile, scriptFile);
+ SupervisorUtils.rmrAsUser(conf, scriptFile, scriptFile);
}
String script = ServerUtils.writeScript(td, command, env);
List<String> args = Arrays.asList("profiler", td, script);
- int ret = ClientSupervisorUtils.processLauncherAndWait(_conf, user, args, env, logPrefix);
+ int ret = ClientSupervisorUtils.processLauncherAndWait(conf, user, args, env, logPrefix);
return ret == 0;
}
@@ -100,10 +100,10 @@
String user = this.getWorkerUser();
List<String> args = Arrays.asList("worker", workerDir, ServerUtils.writeScript(workerDir, command, env));
List<String> commandPrefix = null;
- if (_resourceIsolationManager != null) {
- commandPrefix = _resourceIsolationManager.getLaunchCommandPrefix(_workerId);
+ if (resourceIsolationManager != null) {
+ commandPrefix = resourceIsolationManager.getLaunchCommandPrefix(workerId);
}
- ClientSupervisorUtils.processLauncher(_conf, user, commandPrefix, args, null, logPrefix, processExitCallback, targetDir);
+ ClientSupervisorUtils.processLauncher(conf, user, commandPrefix, args, null, logPrefix, processExitCallback, targetDir);
}
/**
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
index 2e7aa9c..7706662 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
@@ -21,28 +21,28 @@
import org.apache.storm.utils.LocalState;
public class RunAsUserContainerLauncher extends ContainerLauncher {
- protected final ResourceIsolationInterface _resourceIsolationManager;
- private final Map<String, Object> _conf;
- private final String _supervisorId;
- private final int _supervisorPort;
+ protected final ResourceIsolationInterface resourceIsolationManager;
+ private final Map<String, Object> conf;
+ private final String supervisorId;
+ private final int supervisorPort;
private final StormMetricsRegistry metricsRegistry;
private final ContainerMemoryTracker containerMemoryTracker;
public RunAsUserContainerLauncher(Map<String, Object> conf, String supervisorId, int supervisorPort,
ResourceIsolationInterface resourceIsolationManager, StormMetricsRegistry metricsRegistry,
ContainerMemoryTracker containerMemoryTracker) throws IOException {
- _conf = conf;
- _supervisorId = supervisorId;
- _supervisorPort = supervisorPort;
- _resourceIsolationManager = resourceIsolationManager;
+ this.conf = conf;
+ this.supervisorId = supervisorId;
+ this.supervisorPort = supervisorPort;
+ this.resourceIsolationManager = resourceIsolationManager;
this.metricsRegistry = metricsRegistry;
this.containerMemoryTracker = containerMemoryTracker;
}
@Override
public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
- Container container = new RunAsUserContainer(ContainerType.LAUNCH, _conf, _supervisorId, _supervisorPort, port,
- assignment, _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker, null, null, null);
+ Container container = new RunAsUserContainer(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port,
+ assignment, resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker, null, null, null);
container.setup();
container.launch();
return container;
@@ -50,15 +50,15 @@
@Override
public Container recoverContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
- return new RunAsUserContainer(ContainerType.RECOVER_FULL, _conf, _supervisorId, _supervisorPort, port,
- assignment, _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker,
+ return new RunAsUserContainer(ContainerType.RECOVER_FULL, conf, supervisorId, supervisorPort, port,
+ assignment, resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker,
null, null, null);
}
@Override
public Killable recoverContainer(String workerId, LocalState localState) throws IOException {
- return new RunAsUserContainer(ContainerType.RECOVER_PARTIAL, _conf, _supervisorId, _supervisorPort, -1, null,
- _resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker,
+ return new RunAsUserContainer(ContainerType.RECOVER_PARTIAL, conf, supervisorId, supervisorPort, -1, null,
+ resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker,
null, null, null);
}
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
index 60d5e61..e04b080 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
@@ -12,8 +12,6 @@
package org.apache.storm.daemon.supervisor;
-import com.codahale.metrics.Meter;
-import com.codahale.metrics.Timer;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
@@ -42,7 +40,6 @@
import org.apache.storm.localizer.BlobChangingCallback;
import org.apache.storm.localizer.GoodToGo;
import org.apache.storm.localizer.LocallyCachedBlob;
-import org.apache.storm.metric.StormMetricsRegistry;
import org.apache.storm.metricstore.WorkerMetricsProcessor;
import org.apache.storm.scheduler.ISupervisor;
import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
@@ -83,7 +80,7 @@
ContainerLauncher containerLauncher, String host,
int port, LocalState localState,
IStormClusterState clusterState,
- ISupervisor iSupervisor,
+ ISupervisor supervisor,
AtomicReference<Map<Long, LocalAssignment>> cachedCurrentAssignments,
OnlyLatestExecutor<Integer> metricsExec,
WorkerMetricsProcessor metricsProcessor,
@@ -100,7 +97,7 @@
containerLauncher,
host,
port,
- iSupervisor,
+ supervisor,
localState,
this,
metricsExec, metricsProcessor, slotMetrics);
@@ -179,27 +176,27 @@
/**
* Decide the equivalence of two local assignments, ignoring the order of executors
* This is different from #equal method.
- * @param a Local assignment A
- * @param b Local assignment B
+ * @param first Local assignment A
+ * @param second Local assignment B
* @return True if A and B are equivalent, ignoring the order of the executors
*/
@VisibleForTesting
- static boolean equivalent(LocalAssignment a, LocalAssignment b) {
- if (a == null && b == null) {
+ static boolean equivalent(LocalAssignment first, LocalAssignment second) {
+ if (first == null && second == null) {
return true;
}
- if (a != null && b != null) {
- if (a.get_topology_id().equals(b.get_topology_id())) {
- Set<ExecutorInfo> aexec = new HashSet<>(a.get_executors());
- Set<ExecutorInfo> bexec = new HashSet<>(b.get_executors());
+ if (first != null && second != null) {
+ if (first.get_topology_id().equals(second.get_topology_id())) {
+ Set<ExecutorInfo> aexec = new HashSet<>(first.get_executors());
+ Set<ExecutorInfo> bexec = new HashSet<>(second.get_executors());
if (aexec.equals(bexec)) {
- boolean aHasResources = a.is_set_resources();
- boolean bHasResources = b.is_set_resources();
- if (!aHasResources && !bHasResources) {
+ boolean firstHasResources = first.is_set_resources();
+ boolean secondHasResources = second.is_set_resources();
+ if (!firstHasResources && !secondHasResources) {
return true;
}
- if (aHasResources && bHasResources) {
- if (a.get_resources().equals(b.get_resources())) {
+ if (firstHasResources && secondHasResources) {
+ if (first.get_resources().equals(second.get_resources())) {
return true;
}
}
@@ -241,7 +238,8 @@
* @return the next state
* @throws IOException on any error
*/
- private static DynamicState prepareForNewAssignmentNoWorkersRunning(DynamicState dynamicState, StaticState staticState) throws IOException {
+ private static DynamicState prepareForNewAssignmentNoWorkersRunning(DynamicState dynamicState,
+ StaticState staticState) throws IOException {
assert (dynamicState.container == null);
assert dynamicState.currentAssignment == null;
@@ -264,7 +262,7 @@
Boolean isDead = dynamicState.container.areAllProcessesDead();
if (!isDead) {
if (reason == KillReason.ASSIGNMENT_CHANGED || reason == KillReason.BLOB_CHANGED) {
- staticState.iSupervisor.killedWorker(staticState.port);
+ staticState.supervisor.killedWorker(staticState.port);
}
dynamicState.container.kill();
}
@@ -332,7 +330,8 @@
/**
* Drop all of the changingBlobs and pendingChangingBlobs.
*
- * PRECONDITION: container is null
+ * <p>PRECONDITION: container is null
+ *
* @param dynamicState current state.
* @return the next state.
*/
@@ -356,8 +355,9 @@
* Informs the async localizer for all of blobs that the worker acknowledged the change of blobs.
* Worker has stop as of now.
*
- * PRECONDITION: container is null
+ * <p>PRECONDITION: container is null
* PRECONDITION: changingBlobs should only be for the given assignment.
+ *
* @param dynamicState the current state
* @return the futures for the current assignment.
*/
@@ -486,7 +486,7 @@
/**
* State Transitions for WAITING_FOR_BLOB_UPDATE state.
*
- * PRECONDITION: container is null
+ * <p>PRECONDITION: container is null
* PRECONDITION: pendingChangingBlobs is not empty (otherwise why did we go to this state)
* PRECONDITION: pendingChangingBlobsAssignment is not null.
*
@@ -560,9 +560,11 @@
if (dynamicState.container.areAllProcessesDead()) {
LOG.info("SLOT {} all processes are dead...", staticState.port);
- return cleanupCurrentContainer(dynamicState, staticState,
- dynamicState.pendingLocalization ==
- null ? MachineState.EMPTY : MachineState.WAITING_FOR_BLOB_LOCALIZATION);
+ return cleanupCurrentContainer(dynamicState,
+ staticState,
+ dynamicState.pendingLocalization == null
+ ? MachineState.EMPTY
+ : MachineState.WAITING_FOR_BLOB_LOCALIZATION);
}
LOG.warn("SLOT {} force kill and wait...", staticState.port);
@@ -836,7 +838,9 @@
* @param newAssignment the new assignment for this slot to run, null to run nothing
*/
public final void setNewAssignment(LocalAssignment newAssignment) {
- this.newAssignment.set(newAssignment == null ? null : new TimerDecoratedAssignment(newAssignment, staticState.slotMetrics.workerLaunchDuration));
+ this.newAssignment.set(newAssignment == null
+ ? null
+ : new TimerDecoratedAssignment(newAssignment, staticState.slotMetrics.workerLaunchDuration));
}
@Override
@@ -908,7 +912,6 @@
try {
while (!done) {
Set<TopoProfileAction> origProfileActions = new HashSet<>(profiling.get());
- Set<TopoProfileAction> removed = new HashSet<>(origProfileActions);
Set<BlobChanging> changingResourcesToHandle = dynamicState.changingBlobs;
if (!changingBlobs.isEmpty()) {
@@ -919,8 +922,8 @@
//Remove/Clean up changed requests that are not for us
while (it.hasNext()) {
BlobChanging rc = it.next();
- if (!forSameTopology(rc.assignment, dynamicState.currentAssignment) &&
- !forSameTopology(rc.assignment, dynamicState.newAssignment)) {
+ if (!forSameTopology(rc.assignment, dynamicState.currentAssignment)
+ && !forSameTopology(rc.assignment, dynamicState.newAssignment)) {
rc.latch.countDown(); //Ignore the future
it.remove();
}
@@ -936,16 +939,20 @@
LOG.info("STATE {} -> {}", dynamicState, nextState);
}
//Save the current state for recovery
- if ((nextState.currentAssignment != null && !nextState.currentAssignment.equals(dynamicState.currentAssignment)) ||
- (dynamicState.currentAssignment != null && !dynamicState.currentAssignment.equals(nextState.currentAssignment))) {
+ if ((nextState.currentAssignment != null
+ && !nextState.currentAssignment.equals(dynamicState.currentAssignment))
+ || (dynamicState.currentAssignment != null
+ && !dynamicState.currentAssignment.equals(nextState.currentAssignment))) {
LOG.info("SLOT {}: Changing current assignment from {} to {}", staticState.port, dynamicState.currentAssignment,
nextState.currentAssignment);
saveNewAssignment(nextState.currentAssignment);
}
if (equivalent(nextState.newAssignment, nextState.currentAssignment)
- && nextState.currentAssignment != null && nextState.currentAssignment.get_owner() == null
- && nextState.newAssignment != null && nextState.newAssignment.get_owner() != null) {
+ && nextState.currentAssignment != null
+ && nextState.currentAssignment.get_owner() == null
+ && nextState.newAssignment != null
+ && nextState.newAssignment.get_owner() != null) {
//This is an odd case for a rolling upgrade where the user on the old assignment may be null,
// but not on the new one. Although in all other ways they are the same.
// If this happens we want to use the assignment with the owner.
@@ -955,6 +962,7 @@
}
// clean up the profiler actions that are not being processed
+ Set<TopoProfileAction> removed = new HashSet<>(origProfileActions);
removed.removeAll(dynamicState.profileActions);
removed.removeAll(dynamicState.pendingStopProfileActions);
for (TopoProfileAction action : removed) {
@@ -964,7 +972,8 @@
LOG.error("Error trying to remove profiling request, it will be retried", e);
}
}
- Set<TopoProfileAction> orig, copy;
+ Set<TopoProfileAction> orig;
+ Set<TopoProfileAction> copy;
do {
orig = profiling.get();
copy = new HashSet<>(orig);
@@ -1036,7 +1045,7 @@
public final ContainerLauncher containerLauncher;
public final int port;
public final String host;
- public final ISupervisor iSupervisor;
+ public final ISupervisor supervisor;
public final LocalState localState;
public final BlobChangingCallback changingCallback;
public final OnlyLatestExecutor<Integer> metricsExec;
@@ -1046,7 +1055,7 @@
StaticState(AsyncLocalizer localizer, long hbTimeoutMs, long firstHbTimeoutMs,
long killSleepMs, long monitorFreqMs,
ContainerLauncher containerLauncher, String host, int port,
- ISupervisor iSupervisor, LocalState localState,
+ ISupervisor supervisor, LocalState localState,
BlobChangingCallback changingCallback,
OnlyLatestExecutor<Integer> metricsExec,
WorkerMetricsProcessor metricsProcessor,
@@ -1059,7 +1068,7 @@
this.monitorFreqMs = monitorFreqMs;
this.host = host;
this.port = port;
- this.iSupervisor = iSupervisor;
+ this.supervisor = supervisor;
this.localState = localState;
this.changingCallback = changingCallback;
this.metricsExec = metricsExec;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
index 9bc4f7f..259a13e 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
@@ -84,6 +84,7 @@
private final Map<String, Object> conf;
private final IContext sharedContext;
private final IAuthorizer authorizationHandler;
+ @SuppressWarnings("checkstyle:MemberName")
private final ISupervisor iSupervisor;
private final Utils.UptimeComputer upTime;
private final String stormVersion;
@@ -115,6 +116,7 @@
//Passed to workers in local clusters, exposed by thrift server in distributed mode
private org.apache.storm.generated.Supervisor.Iface supervisorThriftInterface;
+ @SuppressWarnings("checkstyle:ParameterName")
private Supervisor(ISupervisor iSupervisor, StormMetricsRegistry metricsRegistry)
throws IOException, IllegalAccessException, InstantiationException, ClassNotFoundException {
this(ConfigUtils.readStormConfig(), null, iSupervisor, metricsRegistry);
@@ -126,8 +128,8 @@
* @param conf config
* @param sharedContext {@link IContext}
* @param iSupervisor {@link ISupervisor}
- * @throws IOException
*/
+ @SuppressWarnings("checkstyle:ParameterName")
public Supervisor(Map<String, Object> conf, IContext sharedContext, ISupervisor iSupervisor, StormMetricsRegistry metricsRegistry)
throws IOException, IllegalAccessException, ClassNotFoundException, InstantiationException {
this.conf = conf;
@@ -186,8 +188,6 @@
/**
* supervisor daemon enter entrance.
- *
- * @param args
*/
public static void main(String[] args) throws Exception {
Utils.setupDefaultUncaughtExceptionHandler();
@@ -367,7 +367,6 @@
@VisibleForTesting
public void checkAuthorization(String topoName, Map<String, Object> topoConf, String operation, ReqContext context)
throws AuthorizationException {
- IAuthorizer aclHandler = authorizationHandler;
if (context == null) {
context = ReqContext.context();
}
@@ -384,6 +383,7 @@
throw new WrappedAuthorizationException("Supervisor does not support impersonation");
}
+ IAuthorizer aclHandler = authorizationHandler;
if (aclHandler != null) {
if (!aclHandler.permit(context, operation, checkConf)) {
ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(),
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
index 4619aeb..a0d0397 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
@@ -64,10 +64,7 @@
/**
* Given the blob information returns the value of the uncompress field, handling it either being a string or a boolean value, or if
- * it's not specified then returns false
- *
- * @param blobInfo
- * @return
+ * it's not specified then returns false.
*/
public static boolean shouldUncompressBlob(Map<String, Object> blobInfo) {
return ObjectReader.getBoolean(blobInfo.get("uncompress"), false);
@@ -85,10 +82,7 @@
}
/**
- * Returns a list of LocalResources based on the blobstore-map passed in
- *
- * @param blobstoreMap
- * @return
+ * Returns a list of LocalResources based on the blobstore-map passed in.
*/
public static List<LocalResource> blobstoreMapToLocalresources(Map<String, Map<String, Object>> blobstoreMap) {
List<LocalResource> localResourceList = new ArrayList<>();
@@ -109,10 +103,7 @@
}
/**
- * map from worker id to heartbeat
- *
- * @param conf
- * @return
+ * Map from worker id to heartbeat.
*
*/
public static Map<String, LSWorkerHeartbeat> readWorkerHeartbeats(Map<String, Object> conf) {
@@ -120,11 +111,7 @@
}
/**
- * get worker heartbeat by workerId.
- *
- * @param conf
- * @param workerId
- * @return
+ * Get worker heartbeat by workerId.
*/
private static LSWorkerHeartbeat readWorkerHeartbeat(Map<String, Object> conf, String workerId) {
return _instance.readWorkerHeartbeatImpl(conf, workerId);
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java b/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
index fc80b6b..95ab8ae 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
@@ -139,43 +139,43 @@
@VisibleForTesting
LocallyCachedBlob getTopoJar(final String topologyId, String owner) {
return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormJarKey(topologyId),
- (tjk) -> {
- try {
- return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
- LocallyCachedTopologyBlob.TopologyBlobType
- .TOPO_JAR, owner, metricsRegistry);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- });
+ (tjk) -> {
+ try {
+ return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+ LocallyCachedTopologyBlob.TopologyBlobType
+ .TOPO_JAR, owner, metricsRegistry);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
@VisibleForTesting
LocallyCachedBlob getTopoCode(final String topologyId, String owner) {
return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormCodeKey(topologyId),
- (tck) -> {
- try {
- return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
- LocallyCachedTopologyBlob.TopologyBlobType
- .TOPO_CODE, owner, metricsRegistry);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- });
+ (tck) -> {
+ try {
+ return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+ LocallyCachedTopologyBlob.TopologyBlobType
+ .TOPO_CODE, owner, metricsRegistry);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
@VisibleForTesting
LocallyCachedBlob getTopoConf(final String topologyId, String owner) {
return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormConfKey(topologyId),
- (tck) -> {
- try {
- return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
- LocallyCachedTopologyBlob.TopologyBlobType
- .TOPO_CONF, owner, metricsRegistry);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- });
+ (tck) -> {
+ try {
+ return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+ LocallyCachedTopologyBlob.TopologyBlobType
+ .TOPO_CONF, owner, metricsRegistry);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
private LocalizedResource getUserArchive(String user, String key) {
@@ -209,22 +209,22 @@
CompletableFuture<Void> baseBlobs = requestDownloadBaseTopologyBlobs(pna, cb);
return baseBlobs.thenComposeAsync((v) ->
- blobPending.compute(topologyId, (tid, old) -> {
- CompletableFuture<Void> ret = old;
- if (ret == null) {
- ret = CompletableFuture.supplyAsync(new DownloadBlobs(pna, cb), execService);
- } else {
- try {
- addReferencesToBlobs(pna, cb);
- } catch (Exception e) {
- throw new RuntimeException(e);
- } finally {
- pna.complete();
- }
- }
- LOG.debug("Reserved blobs {} {}", topologyId, ret);
- return ret;
- }));
+ blobPending.compute(topologyId, (tid, old) -> {
+ CompletableFuture<Void> ret = old;
+ if (ret == null) {
+ ret = CompletableFuture.supplyAsync(new DownloadBlobs(pna, cb), execService);
+ } else {
+ try {
+ addReferencesToBlobs(pna, cb);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ pna.complete();
+ }
+ }
+ LOG.debug("Reserved blobs {} {}", topologyId, ret);
+ return ret;
+ }));
}
@VisibleForTesting
@@ -241,7 +241,7 @@
topoConf.addReference(pna, cb);
return topologyBasicDownloaded.computeIfAbsent(topologyId,
- (tid) -> downloadOrUpdate(topoJar, topoCode, topoConf));
+ (tid) -> downloadOrUpdate(topoJar, topoCode, topoConf));
}
private CompletableFuture<Void> downloadOrUpdate(LocallyCachedBlob... blobs) {
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java b/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
index ebbbdc5..2ad96ce 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
@@ -29,7 +29,7 @@
* Informs the listener that a blob has changed and is ready to update and replace a localized blob that has been marked as tied to the
* life cycle of the worker process.
*
- * If `go.getLatch()` is never called before the method completes it is assumed that the listener is good with the blob changing.
+ * <p>If `go.getLatch()` is never called before the method completes it is assumed that the listener is good with the blob changing.
*
* @param assignment the assignment this resource and callback are registered with.
* @param port the port that this resource and callback are registered with.
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java b/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
index 9217388..5050e86 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
@@ -29,6 +29,7 @@
public class GoodToGo {
private final GoodToGoLatch latch;
private boolean gotLatch = false;
+
public GoodToGo(CountDownLatch latch, Future<Void> doneChanging) {
this.latch = new GoodToGoLatch(latch, doneChanging);
}
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java b/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
index e8d8171..0a732b7 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
@@ -17,6 +17,8 @@
import java.io.IOException;
@FunctionalInterface
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public interface IOFunction<T, R> {
+
R apply(T t) throws IOException;
}
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
index ea3bdac..87bd970 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
@@ -220,7 +220,7 @@
private void setSize() {
// we trust that the file exists
Path withVersion = getFilePathWithVersion();
- size = ServerUtils.getDU(withVersion.toFile());
+ size = ServerUtils.getDiskUsage(withVersion.toFile());
LOG.debug("size of {} is: {}", withVersion, size);
}
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
index 25d6f4d..ddb59c8 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
@@ -138,6 +138,7 @@
return "Cache: " + currentSize;
}
+ @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
static class LRUComparator implements Comparator<LocallyCachedBlob> {
@Override
public int compare(LocallyCachedBlob r1, LocallyCachedBlob r2) {
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
index 3b019f4..4ba41f9 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
@@ -258,11 +258,12 @@
private void cleanUpTemp(String baseName) throws IOException {
LOG.debug("Cleaning up temporary data in {}", topologyBasicBlobsRootDir);
try (DirectoryStream<Path> children = fsOps.newDirectoryStream(topologyBasicBlobsRootDir,
- (p) -> {
- String fileName = p.getFileName().toString();
- Matcher m = EXTRACT_BASE_NAME_AND_VERSION.matcher(fileName);
- return m.matches() && baseName.equals(m.group(1));
- })) {
+ (p) -> {
+ String fileName = p.getFileName().toString();
+ Matcher m = EXTRACT_BASE_NAME_AND_VERSION.matcher(fileName);
+ return m.matches() && baseName.equals(m.group(1));
+ })
+ ) {
//children is only ever null if topologyBasicBlobsRootDir does not exist. This happens during unit tests
// And because a non-existant directory is by definition clean we are ignoring it.
if (children != null) {
diff --git a/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java b/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
index cfeb63c..6f2cd30 100644
--- a/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
+++ b/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
@@ -21,9 +21,9 @@
public class ClusterMetricsConsumerExecutor {
public static final Logger LOG = LoggerFactory.getLogger(ClusterMetricsConsumerExecutor.class);
private static final String ERROR_MESSAGE_PREPARATION_CLUSTER_METRICS_CONSUMER_FAILED =
- "Preparation of Cluster Metrics Consumer failed. " +
- "Please check your configuration and/or corresponding systems and relaunch Nimbus. " +
- "Skipping handle metrics.";
+ "Preparation of Cluster Metrics Consumer failed. "
+ + "Please check your configuration and/or corresponding systems and relaunch Nimbus. "
+ + "Skipping handle metrics.";
private IClusterMetricsConsumer metricsConsumer;
private String consumerClassName;
@@ -39,8 +39,9 @@
metricsConsumer = (IClusterMetricsConsumer) Class.forName(consumerClassName).newInstance();
metricsConsumer.prepare(registrationArgument);
} catch (Exception e) {
- LOG.error("Could not instantiate or prepare Cluster Metrics Consumer with fully qualified name " +
- consumerClassName, e);
+ LOG.error("Could not instantiate or prepare Cluster Metrics Consumer with fully qualified name "
+ + consumerClassName,
+ e);
if (metricsConsumer != null) {
metricsConsumer.cleanup();
diff --git a/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java b/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
index f07dc54..0e296fb 100644
--- a/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
+++ b/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
@@ -19,20 +19,18 @@
import org.slf4j.LoggerFactory;
/**
- * Listens for cluster related metrics, dumps them to log
+ * Listens for cluster related metrics, dumps them to log.
*
- * To use, edit the storm.yaml config file:
- *
+ * <p>To use, edit the storm.yaml config file:
* ```yaml
* storm.cluster.metrics.register:
* - class: "org.apache.storm.metrics.LoggingClusterMetricsConsumer"
* ```
- *
*/
public class LoggingClusterMetricsConsumer implements IClusterMetricsConsumer {
public static final Logger LOG = LoggerFactory.getLogger(LoggingClusterMetricsConsumer.class);
- static private String padding = " ";
+ private static String padding = " ";
@Override
public void prepare(Object registrationArgument) {
@@ -58,8 +56,8 @@
for (DataPoint p : dataPoints) {
sb.delete(header.length(), sb.length());
sb.append(p.getName())
- .append(padding).delete(header.length() + 23, sb.length()).append("\t")
- .append(p.getValue());
+ .append(padding).delete(header.length() + 23, sb.length()).append("\t")
+ .append(p.getValue());
LOG.info(sb.toString());
}
}
@@ -72,8 +70,10 @@
for (DataPoint p : dataPoints) {
sb.delete(header.length(), sb.length());
sb.append(p.getName())
- .append(padding).delete(header.length() + 23, sb.length()).append("\t")
- .append(p.getValue());
+ .append(padding)
+ .delete(header.length() + 23, sb.length())
+ .append("\t")
+ .append(p.getValue());
LOG.info(sb.toString());
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
index fae3865..2c01423 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
@@ -184,8 +184,7 @@
// attempt to find the string in the database
try {
stringMetadata = store.rocksDbGetStringMetadata(type, s);
- }
- catch (RocksDBException e) {
+ } catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
if (stringMetadata != null) {
@@ -244,8 +243,7 @@
unusedIds.remove(key.getMetadataStringId());
return true; // process all metadata
});
- }
- catch (RocksDBException e) {
+ } catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
index ba3f08b..89e4cec 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
@@ -276,8 +276,7 @@
// attempt to find the string in the database
try {
stringMetadata = rocksDbGetStringMetadata(type, s);
- }
- catch (RocksDBException e) {
+ } catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
@@ -525,18 +524,15 @@
// callback to caller
scanCallback.cb(metric);
- }
- catch (MetricException e) {
+ } catch (MetricException e) {
LOG.warn("Failed to report found metric: {}", e.getMessage());
}
- }
- else {
+ } else {
try {
if (!rawCallback.cb(key, val)) {
return;
}
- }
- catch (RocksDBException e) {
+ } catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
}
@@ -627,8 +623,7 @@
}
return true;
});
- }
- catch (RocksDBException e) {
+ } catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
index bf90c69..e8428c3 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
@@ -109,10 +109,10 @@
/**
* Add the string metadata to the cache.
*
- * NOTE: this can cause data to be evicted from the cache when full. When this occurs, the evictionCallback() method
+ * <p>NOTE: this can cause data to be evicted from the cache when full. When this occurs, the evictionCallback() method
* is called to store the metadata back into the RocksDB database.
*
- * This method is only exposed to the WritableStringMetadataCache interface.
+ * <p>This method is only exposed to the WritableStringMetadataCache interface.
*
* @param s The string to add
* @param stringMetadata The string's metadata
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
index fc5f0b1..d31fe9f 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
@@ -24,10 +24,10 @@
/**
* Add the string metadata to the cache.
*
- * * NOTE: this can cause data to be evicted from the cache when full. When this occurs, the evictionCallback() method
+ * <p>NOTE: this can cause data to be evicted from the cache when full. When this occurs, the evictionCallback() method
* is called to store the metadata back into the RocksDB database.
*
- * This method is only exposed to the WritableStringMetadataCache interface.
+ * <p>This method is only exposed to the WritableStringMetadataCache interface.
*
* @param s The string to add
* @param stringMetadata The string's metadata
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java b/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
index 05cb1df..4f84997 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
@@ -186,7 +186,6 @@
* Get an assignments from the target queue with the specific index.
* @param queueIndex index of the queue
* @return an {@link NodeAssignments}
- * @throws InterruptedException
*/
public NodeAssignments nextAssignments(Integer queueIndex) throws InterruptedException {
NodeAssignments target = null;
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java b/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
index 7fef7bf..c6bb208 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
@@ -24,7 +24,7 @@
private static final Logger LOG = LoggerFactory.getLogger(DefaultTopologyValidator.class);
@Override
- public void prepare(Map<String, Object> StormConf) {
+ public void prepare(Map<String, Object> stormConf) {
}
@Override
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
index 8247a85..1b5a6fb 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
@@ -18,21 +18,19 @@
* A plugin interface that gets invoked any time there is an action for a topology.
*/
public interface ITopologyActionNotifierPlugin {
+
/**
* Called once during nimbus initialization.
- * @param StormConf
*/
- void prepare(Map<String, Object> StormConf);
+ void prepare(Map<String, Object> stormConf);
/**
* When a new actions is executed for a topology, this method will be called.
- * @param topologyName
- * @param action
*/
void notify(String topologyName, String action);
/**
- * called during shutdown.
+ * Called during shutdown.
*/
void cleanup();
}
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
index 88d756d..ff17192 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
@@ -18,7 +18,7 @@
public interface ITopologyValidator {
- void prepare(Map<String, Object> StormConf);
+ void prepare(Map<String, Object> stormConf);
void validate(String topologyName, Map<String, Object> topologyConf, StormTopology topology)
throws InvalidTopologyException;
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java b/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
index bab42ce..08c7e99 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
@@ -12,6 +12,8 @@
package org.apache.storm.nimbus;
+import com.codahale.metrics.Meter;
+
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
@@ -20,7 +22,6 @@
import java.util.TreeSet;
import javax.security.auth.Subject;
-import com.codahale.metrics.Meter;
import org.apache.commons.io.IOUtils;
import org.apache.storm.Config;
import org.apache.storm.DaemonConfig;
@@ -90,7 +91,7 @@
this.numLostLeader = metricsRegistry.registerMeter("nimbus:num-lost-leadership");
//Since we only give up leadership if we're waiting for blobs to sync,
//it makes sense to wait a full sync cycle before trying for leadership again.
- this.requeueDelayMs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS))*1000;
+ this.requeueDelayMs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS)) * 1000;
}
/**
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java b/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
index ac01aae..a8ec26a 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
@@ -12,6 +12,8 @@
package org.apache.storm.nimbus;
+import static java.util.stream.Collectors.toSet;
+
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@@ -21,8 +23,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static java.util.stream.Collectors.toSet;
-
/**
* Wait for a node to report worker heartbeats until a configured timeout. For cases below we have strategies:
*
diff --git a/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java b/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
index 1543c64..d25529f 100644
--- a/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
+++ b/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
@@ -48,13 +48,13 @@
private String secret;
private final String topologyName;
private volatile ChannelGroup allChannels = new DefaultChannelGroup("storm-server", GlobalEventExecutor.INSTANCE);
- private final ChannelGroup authenticated_channels = new DefaultChannelGroup("authenticated-pacemaker-channels", GlobalEventExecutor.INSTANCE);
+ private final ChannelGroup authenticatedChannels = new DefaultChannelGroup("authenticated-pacemaker-channels",
+ GlobalEventExecutor.INSTANCE);
private final ThriftNettyServerCodec.AuthMethod authMethod;
private final EventLoopGroup bossEventLoopGroup;
private final EventLoopGroup workerEventLoopGroup;
public PacemakerServer(IServerMessageHandler handler, Map<String, Object> config) {
- int maxWorkers = (int) config.get(DaemonConfig.PACEMAKER_MAX_THREADS);
int port = (int) config.get(Config.PACEMAKER_PORT);
this.handler = handler;
this.topologyName = "pacemaker_server";
@@ -63,9 +63,9 @@
switch (auth) {
case "DIGEST":
- Configuration login_conf = ClientAuthUtils.getConfiguration(config);
+ Configuration loginConf = ClientAuthUtils.getConfiguration(config);
authMethod = ThriftNettyServerCodec.AuthMethod.DIGEST;
- this.secret = ClientAuthUtils.makeDigestPayload(login_conf, ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_DIGEST);
+ this.secret = ClientAuthUtils.makeDigestPayload(loginConf, ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_DIGEST);
if (this.secret == null) {
LOG.error("Can't start pacemaker server without digest secret.");
throw new RuntimeException("Can't start pacemaker server without digest secret.");
@@ -89,6 +89,7 @@
ThreadFactory workerFactory = new NettyRenameThreadFactory("server-worker");
this.bossEventLoopGroup = new NioEventLoopGroup(1, bossFactory);
// 0 means DEFAULT_EVENT_LOOP_THREADS
+ int maxWorkers = (int) config.get(DaemonConfig.PACEMAKER_MAX_THREADS);
// https://github.com/netty/netty/blob/netty-4.1.24.Final/transport/src/main/java/io/netty/channel/MultithreadEventLoopGroup.java#L40
this.workerEventLoopGroup = new NioEventLoopGroup(maxWorkers > 0 ? maxWorkers : 0, workerFactory);
@@ -121,7 +122,7 @@
}
public void cleanPipeline(Channel channel) {
- boolean authenticated = authenticated_channels.contains(channel);
+ boolean authenticated = authenticatedChannels.contains(channel);
if (!authenticated) {
if (channel.pipeline().get(ThriftNettyServerCodec.SASL_HANDLER) != null) {
channel.pipeline().remove(ThriftNettyServerCodec.SASL_HANDLER);
@@ -135,7 +136,7 @@
public void received(Object mesg, String remote, Channel channel) throws InterruptedException {
cleanPipeline(channel);
- boolean authenticated = (authMethod == ThriftNettyServerCodec.AuthMethod.NONE) || authenticated_channels.contains(channel);
+ boolean authenticated = (authMethod == ThriftNettyServerCodec.AuthMethod.NONE) || authenticatedChannels.contains(channel);
HBMessage m = (HBMessage) mesg;
LOG.debug("received message. Passing to handler. {} : {} : {}",
handler.toString(), m.toString(), channel.toString());
@@ -161,6 +162,6 @@
@Override
public void authenticated(Channel c) {
LOG.debug("Pacemaker server authenticated channel: {}", c.toString());
- authenticated_channels.add(c);
+ authenticatedChannels.add(c);
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java b/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
index 45babf9..d15f369 100644
--- a/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
+++ b/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
@@ -53,32 +53,32 @@
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
- pipeline.addLast("encoder", new ThriftEncoder());
+ pipeline.addLast("encoder", new ThriftEncoder());
pipeline.addLast("decoder", new ThriftDecoder(thriftMessageMaxSizeBytes));
- if (authMethod == AuthMethod.DIGEST) {
- try {
- LOG.debug("Adding SaslStormServerHandler to pacemaker server pipeline.");
- pipeline.addLast(SASL_HANDLER, new SaslStormServerHandler((ISaslServer) server));
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- } else if (authMethod == AuthMethod.KERBEROS) {
- try {
- LOG.debug("Adding KerberosSaslServerHandler to pacemaker server pipeline.");
+ if (authMethod == AuthMethod.DIGEST) {
+ try {
+ LOG.debug("Adding SaslStormServerHandler to pacemaker server pipeline.");
+ pipeline.addLast(SASL_HANDLER, new SaslStormServerHandler((ISaslServer) server));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ } else if (authMethod == AuthMethod.KERBEROS) {
+ try {
+ LOG.debug("Adding KerberosSaslServerHandler to pacemaker server pipeline.");
ArrayList<String> authorizedUsers = new ArrayList<>(1);
- authorizedUsers.add((String) topoConf.get(DaemonConfig.NIMBUS_DAEMON_USER));
- pipeline.addLast(KERBEROS_HANDLER, new KerberosSaslServerHandler((ISaslServer) server,
- topoConf,
- ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_SERVER,
- authorizedUsers));
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- } else if (authMethod == AuthMethod.NONE) {
- LOG.debug("Not authenticating any clients. AuthMethod is NONE");
- }
+ authorizedUsers.add((String) topoConf.get(DaemonConfig.NIMBUS_DAEMON_USER));
+ pipeline.addLast(KERBEROS_HANDLER, new KerberosSaslServerHandler((ISaslServer) server,
+ topoConf,
+ ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_SERVER,
+ authorizedUsers));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ } else if (authMethod == AuthMethod.NONE) {
+ LOG.debug("Not authenticating any clients. AuthMethod is NONE");
+ }
- pipeline.addLast("handler", new StormServerHandler(server));
+ pipeline.addLast("handler", new StormServerHandler(server));
}
public enum AuthMethod {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java b/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
index 5e1608a..756eb79 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
@@ -1034,8 +1034,8 @@
totalScheduledResources.add(req);
}
// shared off heap node memory
- for (Double offHeapNodeMemory : nodeToScheduledOffHeapNodeMemoryCache.
- computeIfAbsent(nid, Cluster::makeMap).values()) {
+ for (Double offHeapNodeMemory
+ : nodeToScheduledOffHeapNodeMemoryCache.computeIfAbsent(nid, Cluster::makeMap).values()) {
totalScheduledResources.addOffHeap(offHeapNodeMemory);
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java b/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
index 08d1229..7cb1dd6 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
@@ -16,6 +16,7 @@
import java.util.Map;
public interface ISupervisor {
+
void prepare(Map<String, Object> topoConf, String schedulerLocalDir);
// for mesos, this is {hostname}-{topologyid}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
index 03c0c6a..7b60ffd 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
@@ -40,7 +40,7 @@
// blacklist the good hosts and remove those workers from the list of need to be assigned workers
// otherwise unassign all other workers for isolated topologies if assigned
public class IsolationScheduler implements IScheduler {
- private final static Logger LOG = LoggerFactory.getLogger(IsolationScheduler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(IsolationScheduler.class);
private Map<String, Number> isoMachines;
@@ -65,7 +65,6 @@
// set blacklist to what it was initially
@Override
public void schedule(Topologies topologies, Cluster cluster) {
- Set<String> origBlacklist = cluster.getBlacklistedHosts();
List<TopologyDetails> isoTopologies = isolatedTopologies(topologies.getTopologies());
Set<String> isoIds = extractTopologyIds(isoTopologies);
Map<String, Set<Set<ExecutorDetails>>> topologyWorkerSpecs = topologyWorkerSpecs(isoTopologies);
@@ -139,6 +138,7 @@
Topologies leftOverTopologies = leftoverTopologies(topologies, allocatedTopologies);
DefaultScheduler.defaultSchedule(leftOverTopologies, cluster);
}
+ Set<String> origBlacklist = cluster.getBlacklistedHosts();
cluster.setBlacklistedHosts(origBlacklist);
}
@@ -218,8 +218,8 @@
List<ExecutorDetails> allExecutors = new ArrayList<ExecutorDetails>();
Collection<List<ExecutorDetails>> values = compExecutors.values();
- for (List<ExecutorDetails> eList : values) {
- allExecutors.addAll(eList);
+ for (List<ExecutorDetails> value : values) {
+ allExecutors.addAll(value);
}
int numWorkers = topology.getNumWorkers();
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java b/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
index b222916..d135afb 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
@@ -41,9 +41,9 @@
}
/**
- * Create a new Topologies from a map of id to topology
+ * Create a new Topologies from a map of id to topology.
*
- * @param topologies a map of topology id to topology details.
+ * @param topologies a map of topology id to topology details
*/
public Topologies(Map<String, TopologyDetails> topologies) {
if (topologies == null) {
@@ -81,10 +81,10 @@
}
/**
- * Get a topology given an ID
+ * Get a topology given an ID.
*
* @param topologyId the id of the topology to get
- * @return the topology or null if it is not found.
+ * @return the topology or null if it is not found
*/
public TopologyDetails getById(String topologyId) {
return topologies.get(topologyId);
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java b/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
index 757f256..660f3d8 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
@@ -352,8 +352,7 @@
/**
* Get the total CPU requirement for executor.
*
- * @param exec
- * @return Map<String , Double> generic resource mapping requirement for the executor
+ * @return generic resource mapping requirement for the executor
*/
public Double getTotalCpuReqTask(ExecutorDetails exec) {
if (hasExecInTopo(exec)) {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
index 33c0e04..0527335 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
@@ -91,7 +91,9 @@
badSupervisorsToleranceSlidingWindow = EvictingQueue.create(windowSize);
cachedSupervisors = new HashMap<>();
blacklistedSupervisorIds = new HashSet<>();
- blacklistOnBadSlots = ObjectReader.getBoolean(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT), true);
+ blacklistOnBadSlots = ObjectReader.getBoolean(
+ this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT),
+ true);
//nimbus:num-blacklisted-supervisor + non-blacklisted supervisor = nimbus:num-supervisors
metricsRegistry.registerGauge("nimbus:num-blacklisted-supervisor", () -> blacklistedSupervisorIds.size());
@@ -263,4 +265,4 @@
public Set<String> getBlacklistSupervisorIds() {
return Collections.unmodifiableSet(blacklistedSupervisorIds);
}
-}
\ No newline at end of file
+}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
index 574fe91..4273f79 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
@@ -54,7 +54,7 @@
if (cluster.needsSchedulingRas(td)) {
int slots = 0;
try {
- slots = ServerUtils.getEstimatedWorkerCountForRASTopo(td.getConf(), td.getTopology());
+ slots = ServerUtils.getEstimatedWorkerCountForRasTopo(td.getConf(), td.getTopology());
} catch (InvalidTopologyException e) {
LOG.warn("Could not guess the number of slots needed for {}", td.getName(), e);
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
index 1b6ac91..05ccfb7 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
@@ -25,23 +25,23 @@
import org.slf4j.LoggerFactory;
/**
- * A pool of machines that anyone can use, but topologies are not isolated
+ * A pool of machines that anyone can use, but topologies are not isolated.
*/
public class DefaultPool extends NodePool {
private static final Logger LOG = LoggerFactory.getLogger(DefaultPool.class);
- private Set<Node> _nodes = new HashSet<>();
- private HashMap<String, TopologyDetails> _tds = new HashMap<>();
+ private Set<Node> nodes = new HashSet<>();
+ private HashMap<String, TopologyDetails> tds = new HashMap<>();
@Override
public void addTopology(TopologyDetails td) {
String topId = td.getId();
LOG.debug("Adding in Topology {}", topId);
- _tds.put(topId, td);
- SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+ tds.put(topId, td);
+ SchedulerAssignment assignment = cluster.getAssignmentById(topId);
if (assignment != null) {
for (WorkerSlot ws : assignment.getSlots()) {
- Node n = _nodeIdToNode.get(ws.getNodeId());
- _nodes.add(n);
+ Node n = nodeIdToNode.get(ws.getNodeId());
+ nodes.add(n);
}
}
}
@@ -54,15 +54,15 @@
@Override
public Collection<Node> takeNodes(int nodesNeeded) {
HashSet<Node> ret = new HashSet<>();
- LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+ LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
for (Node n : sortedNodes) {
if (nodesNeeded <= ret.size()) {
break;
}
if (n.isAlive()) {
- n.freeAllSlots(_cluster);
- _nodes.remove(n);
+ n.freeAllSlots(cluster);
+ nodes.remove(n);
ret.add(n);
}
}
@@ -72,22 +72,24 @@
@Override
public int nodesAvailable() {
int total = 0;
- for (Node n : _nodes) {
- if (n.isAlive()) total++;
+ for (Node n : nodes) {
+ if (n.isAlive()) {
+ total++;
+ }
}
return total;
}
@Override
public int slotsAvailable() {
- return Node.countTotalSlotsAlive(_nodes);
+ return Node.countTotalSlotsAlive(nodes);
}
@Override
public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
int nodesFound = 0;
int slotsFound = 0;
- LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+ LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
for (Node n : sortedNodes) {
if (slotsNeeded <= 0) {
@@ -106,15 +108,15 @@
@Override
public Collection<Node> takeNodesBySlots(int slotsNeeded) {
HashSet<Node> ret = new HashSet<>();
- LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+ LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
for (Node n : sortedNodes) {
if (slotsNeeded <= 0) {
break;
}
if (n.isAlive()) {
- n.freeAllSlots(_cluster);
- _nodes.remove(n);
+ n.freeAllSlots(cluster);
+ nodes.remove(n);
ret.add(n);
slotsNeeded -= n.totalSlotsFree();
}
@@ -124,35 +126,38 @@
@Override
public void scheduleAsNeeded(NodePool... lesserPools) {
- for (TopologyDetails td : _tds.values()) {
+ for (TopologyDetails td : tds.values()) {
String topId = td.getId();
- if (_cluster.needsScheduling(td)) {
+ if (cluster.needsScheduling(td)) {
LOG.debug("Scheduling topology {}", topId);
int totalTasks = td.getExecutors().size();
int origRequest = td.getNumWorkers();
int slotsRequested = Math.min(totalTasks, origRequest);
- int slotsUsed = Node.countSlotsUsed(topId, _nodes);
- int slotsFree = Node.countFreeSlotsAlive(_nodes);
+ int slotsUsed = Node.countSlotsUsed(topId, nodes);
+ int slotsFree = Node.countFreeSlotsAlive(nodes);
//Check to see if we have enough slots before trying to get them
int slotsAvailable = 0;
if (slotsRequested > slotsFree) {
slotsAvailable = NodePool.slotsAvailable(lesserPools);
}
int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
- int executorsNotRunning = _cluster.getUnassignedExecutors(td).size();
+ int executorsNotRunning = cluster.getUnassignedExecutors(td).size();
LOG.debug("Slots... requested {} used {} free {} available {} to be used {}, executors not running {}",
slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse, executorsNotRunning);
if (slotsToUse <= 0) {
if (executorsNotRunning > 0) {
- _cluster.setStatus(topId, "Not fully scheduled (No free slots in default pool) " + executorsNotRunning +
- " executors not scheduled");
+ cluster.setStatus(topId, "Not fully scheduled (No free slots in default pool) "
+ + executorsNotRunning
+ + " executors not scheduled");
} else {
if (slotsUsed < slotsRequested) {
- _cluster.setStatus(topId, "Running with fewer slots than requested (" + slotsUsed + "/" + origRequest + ")");
+ cluster.setStatus(topId, "Running with fewer slots than requested ("
+ + slotsUsed + "/"
+ + origRequest + ")");
} else { //slotsUsed < origRequest
- _cluster.setStatus(topId,
- "Fully Scheduled (requested " + origRequest + " slots, but could only use " + slotsUsed +
- ")");
+ cluster.setStatus(topId,
+ "Fully Scheduled (requested " + origRequest
+ + " slots, but could only use " + slotsUsed + ")");
}
}
continue;
@@ -160,28 +165,28 @@
int slotsNeeded = slotsToUse - slotsFree;
if (slotsNeeded > 0) {
- _nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
+ nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
}
if (executorsNotRunning <= 0) {
//There are free slots that we can take advantage of now.
- for (Node n : _nodes) {
- n.freeTopology(topId, _cluster);
+ for (Node n : nodes) {
+ n.freeTopology(topId, cluster);
}
- slotsFree = Node.countFreeSlotsAlive(_nodes);
+ slotsFree = Node.countFreeSlotsAlive(nodes);
slotsToUse = Math.min(slotsRequested, slotsFree);
}
RoundRobinSlotScheduler slotSched =
- new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
+ new RoundRobinSlotScheduler(td, slotsToUse, cluster);
- LinkedList<Node> nodes = new LinkedList<>(_nodes);
+ LinkedList<Node> nodes = new LinkedList<>(this.nodes);
while (true) {
Node n;
do {
if (nodes.isEmpty()) {
- throw new IllegalStateException("This should not happen, we" +
- " messed up and did not get enough slots");
+ throw new IllegalStateException("This should not happen, we"
+ + " messed up and did not get enough slots");
}
n = nodes.peekFirst();
if (n.totalSlotsFree() == 0) {
@@ -193,24 +198,28 @@
break;
}
}
- int afterSchedSlotsUsed = Node.countSlotsUsed(topId, _nodes);
+ int afterSchedSlotsUsed = Node.countSlotsUsed(topId, this.nodes);
if (afterSchedSlotsUsed < slotsRequested) {
- _cluster.setStatus(topId, "Running with fewer slots than requested (" + afterSchedSlotsUsed + "/" + origRequest + ")");
+ cluster.setStatus(topId, "Running with fewer slots than requested ("
+ + afterSchedSlotsUsed + "/" + origRequest + ")");
} else if (afterSchedSlotsUsed < origRequest) {
- _cluster.setStatus(topId,
- "Fully Scheduled (requested " + origRequest + " slots, but could only use " + afterSchedSlotsUsed +
- ")");
+ cluster.setStatus(topId,
+ "Fully Scheduled (requested "
+ + origRequest
+ + " slots, but could only use "
+ + afterSchedSlotsUsed
+ + ")");
} else {
- _cluster.setStatus(topId, "Fully Scheduled");
+ cluster.setStatus(topId, "Fully Scheduled");
}
} else {
- _cluster.setStatus(topId, "Fully Scheduled");
+ cluster.setStatus(topId, "Fully Scheduled");
}
}
}
@Override
public String toString() {
- return "DefaultPool " + _nodes.size() + " nodes " + _tds.size() + " topologies";
+ return "DefaultPool " + nodes.size() + " nodes " + tds.size() + " topologies";
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
index e868223..867baa4 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
@@ -23,23 +23,23 @@
import org.slf4j.LoggerFactory;
/**
- * All of the machines that currently have nothing assigned to them
+ * All of the machines that currently have nothing assigned to them.
*/
public class FreePool extends NodePool {
private static final Logger LOG = LoggerFactory.getLogger(FreePool.class);
- private Set<Node> _nodes = new HashSet<>();
- private int _totalSlots = 0;
+ private Set<Node> nodes = new HashSet<>();
+ private int totalSlots = 0;
@Override
public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
super.init(cluster, nodeIdToNode);
for (Node n : nodeIdToNode.values()) {
if (n.isTotallyFree() && n.isAlive()) {
- _nodes.add(n);
- _totalSlots += n.totalSlotsFree();
+ nodes.add(n);
+ totalSlots += n.totalSlotsFree();
}
}
- LOG.debug("Found {} nodes with {} slots", _nodes.size(), _totalSlots);
+ LOG.debug("Found {} nodes with {} slots", nodes.size(), totalSlots);
}
@Override
@@ -56,11 +56,11 @@
@Override
public Collection<Node> takeNodes(int nodesNeeded) {
HashSet<Node> ret = new HashSet<>();
- Iterator<Node> it = _nodes.iterator();
+ Iterator<Node> it = nodes.iterator();
while (it.hasNext() && nodesNeeded > ret.size()) {
Node n = it.next();
ret.add(n);
- _totalSlots -= n.totalSlotsFree();
+ totalSlots -= n.totalSlotsFree();
it.remove();
}
return ret;
@@ -68,22 +68,22 @@
@Override
public int nodesAvailable() {
- return _nodes.size();
+ return nodes.size();
}
@Override
public int slotsAvailable() {
- return _totalSlots;
+ return totalSlots;
}
@Override
public Collection<Node> takeNodesBySlots(int slotsNeeded) {
HashSet<Node> ret = new HashSet<>();
- Iterator<Node> it = _nodes.iterator();
+ Iterator<Node> it = nodes.iterator();
while (it.hasNext() && slotsNeeded > 0) {
Node n = it.next();
ret.add(n);
- _totalSlots -= n.totalSlotsFree();
+ totalSlots -= n.totalSlotsFree();
slotsNeeded -= n.totalSlotsFree();
it.remove();
}
@@ -94,7 +94,7 @@
public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
int slotsFound = 0;
int nodesFound = 0;
- Iterator<Node> it = _nodes.iterator();
+ Iterator<Node> it = nodes.iterator();
while (it.hasNext() && slotsNeeded > 0) {
Node n = it.next();
nodesFound++;
@@ -112,6 +112,6 @@
@Override
public String toString() {
- return "FreePool of " + _nodes.size() + " nodes with " + _totalSlots + " slots";
+ return "FreePool of " + nodes.size() + " nodes with " + totalSlots + " slots";
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
index 19ce712..77f9a3d 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
@@ -27,38 +27,38 @@
import org.slf4j.LoggerFactory;
/**
- * A pool of machines that can be used to run isolated topologies
+ * A pool of machines that can be used to run isolated topologies.
*/
public class IsolatedPool extends NodePool {
private static final Logger LOG = LoggerFactory.getLogger(IsolatedPool.class);
- private Map<String, Set<Node>> _topologyIdToNodes = new HashMap<>();
- private HashMap<String, TopologyDetails> _tds = new HashMap<>();
- private HashSet<String> _isolated = new HashSet<>();
- private int _maxNodes;
- private int _usedNodes;
+ private Map<String, Set<Node>> topologyIdToNodes = new HashMap<>();
+ private HashMap<String, TopologyDetails> tds = new HashMap<>();
+ private HashSet<String> isolated = new HashSet<>();
+ private int maxNodes;
+ private int usedNodes;
public IsolatedPool(int maxNodes) {
- _maxNodes = maxNodes;
- _usedNodes = 0;
+ this.maxNodes = maxNodes;
+ usedNodes = 0;
}
@Override
public void addTopology(TopologyDetails td) {
String topId = td.getId();
LOG.debug("Adding in Topology {}", topId);
- SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+ SchedulerAssignment assignment = cluster.getAssignmentById(topId);
Set<Node> assignedNodes = new HashSet<>();
if (assignment != null) {
for (WorkerSlot ws : assignment.getSlots()) {
- Node n = _nodeIdToNode.get(ws.getNodeId());
+ Node n = nodeIdToNode.get(ws.getNodeId());
assignedNodes.add(n);
}
}
- _usedNodes += assignedNodes.size();
- _topologyIdToNodes.put(topId, assignedNodes);
- _tds.put(topId, td);
+ usedNodes += assignedNodes.size();
+ topologyIdToNodes.put(topId, assignedNodes);
+ tds.put(topId, td);
if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
- _isolated.add(topId);
+ isolated.add(topId);
}
}
@@ -66,10 +66,10 @@
public boolean canAdd(TopologyDetails td) {
//Only add topologies that are not sharing nodes with other topologies
String topId = td.getId();
- SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+ SchedulerAssignment assignment = cluster.getAssignmentById(topId);
if (assignment != null) {
for (WorkerSlot ws : assignment.getSlots()) {
- Node n = _nodeIdToNode.get(ws.getNodeId());
+ Node n = nodeIdToNode.get(ws.getNodeId());
if (n.getRunningTopologies().size() > 1) {
return false;
}
@@ -80,18 +80,17 @@
@Override
public void scheduleAsNeeded(NodePool... lesserPools) {
- for (String topId : _topologyIdToNodes.keySet()) {
- TopologyDetails td = _tds.get(topId);
- Set<Node> allNodes = _topologyIdToNodes.get(topId);
+ for (String topId : topologyIdToNodes.keySet()) {
+ TopologyDetails td = tds.get(topId);
+ Set<Node> allNodes = topologyIdToNodes.get(topId);
Number nodesRequested = (Number) td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES);
Integer effectiveNodesRequested = null;
if (nodesRequested != null) {
effectiveNodesRequested = Math.min(td.getExecutors().size(),
+nodesRequested.intValue());
}
- if (_cluster.needsScheduling(td) ||
- (effectiveNodesRequested != null &&
- allNodes.size() != effectiveNodesRequested)) {
+ if (cluster.needsScheduling(td)
+ || (effectiveNodesRequested != null && allNodes.size() != effectiveNodesRequested)) {
LOG.debug("Scheduling topology {}", topId);
int slotsToUse = 0;
if (effectiveNodesRequested == null) {
@@ -106,7 +105,7 @@
}
RoundRobinSlotScheduler slotSched =
- new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
+ new RoundRobinSlotScheduler(td, slotsToUse, cluster);
LOG.debug("Nodes sorted by free space {}", allNodes);
while (true) {
@@ -120,9 +119,9 @@
}
}
}
- Set<Node> found = _topologyIdToNodes.get(topId);
+ Set<Node> found = topologyIdToNodes.get(topId);
int nc = found == null ? 0 : found.size();
- _cluster.setStatus(topId, "Scheduled Isolated on " + nc + " Nodes");
+ cluster.setStatus(topId, "Scheduled Isolated on " + nc + " Nodes");
}
}
@@ -152,7 +151,7 @@
* Get the nodes needed to schedule an isolated topology.
* @param td the topology to be scheduled
* @param allNodes the nodes already scheduled for this topology.
- * This will be updated to include new nodes if needed.
+ * This will be updated to include new nodes if needed.
* @param lesserPools node pools we can steal nodes from
* @return the number of additional slots that should be used for scheduling.
*/
@@ -163,39 +162,45 @@
int nodesFromUsAvailable = nodesAvailable();
int nodesFromOthersAvailable = NodePool.nodesAvailable(lesserPools);
- int nodesUsed = _topologyIdToNodes.get(topId).size();
+ int nodesUsed = topologyIdToNodes.get(topId).size();
int nodesNeeded = nodesRequested - nodesUsed;
- LOG.debug("Nodes... requested {} used {} available from us {} " +
- "avail from other {} needed {}", nodesRequested,
- nodesUsed, nodesFromUsAvailable, nodesFromOthersAvailable,
- nodesNeeded);
- if ((nodesNeeded - nodesFromUsAvailable) > (_maxNodes - _usedNodes)) {
- _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. "
- + ((nodesNeeded - nodesFromUsAvailable) - (_maxNodes - _usedNodes))
- + " more nodes needed to run topology.");
+ LOG.debug("Nodes... requested {} used {} available from us {} "
+ + "avail from other {} needed {}",
+ nodesRequested,
+ nodesUsed,
+ nodesFromUsAvailable,
+ nodesFromOthersAvailable,
+ nodesNeeded);
+ if ((nodesNeeded - nodesFromUsAvailable) > (maxNodes - usedNodes)) {
+ cluster.setStatus(topId,
+ "Max Nodes("
+ + maxNodes
+ + ") for this user would be exceeded. "
+ + ((nodesNeeded - nodesFromUsAvailable) - (maxNodes - usedNodes))
+ + " more nodes needed to run topology.");
return 0;
}
- //In order to avoid going over _maxNodes I may need to steal from
+ //In order to avoid going over maxNodes I may need to steal from
// myself even though other pools have free nodes. so figure out how
// much each group should provide
- int nodesNeededFromOthers = Math.min(Math.min(_maxNodes - _usedNodes,
+ int nodesNeededFromOthers = Math.min(Math.min(maxNodes - usedNodes,
nodesFromOthersAvailable), nodesNeeded);
int nodesNeededFromUs = nodesNeeded - nodesNeededFromOthers;
LOG.debug("Nodes... needed from us {} needed from others {}",
nodesNeededFromUs, nodesNeededFromOthers);
if (nodesNeededFromUs > nodesFromUsAvailable) {
- _cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
+ cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
return 0;
}
//Get the nodes
Collection<Node> found = NodePool.takeNodes(nodesNeededFromOthers, lesserPools);
- _usedNodes += found.size();
+ usedNodes += found.size();
allNodes.addAll(found);
Collection<Node> foundMore = takeNodes(nodesNeededFromUs);
- _usedNodes += foundMore.size();
+ usedNodes += foundMore.size();
allNodes.addAll(foundMore);
int totalTasks = td.getExecutors().size();
@@ -207,13 +212,20 @@
if (slotsToUse <= 0) {
// if # of workers requested is more than we currently have
if (origRequest > slotsUsed) {
- _cluster.setStatus(topId, "Running with fewer slots than requested " + slotsUsed + "/" +
- origRequest + " on " + allNodes.size() + " node(s) with " + (slotsUsed + slotsFree) +
- " total slots");
+ cluster.setStatus(topId,
+ "Running with fewer slots than requested "
+ + slotsUsed
+ + "/"
+ + origRequest
+ + " on "
+ + allNodes.size()
+ + " node(s) with "
+ + (slotsUsed + slotsFree)
+ + " total slots");
} else {
// if # of workers requested is less than we took
// then we know some workers we track died, since we have more workers than we are supposed to have
- _cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
+ cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
}
}
return slotsToUse;
@@ -223,7 +235,7 @@
* Get the nodes needed to schedule a non-isolated topology.
* @param td the topology to be scheduled
* @param allNodes the nodes already scheduled for this topology.
- * This will be updated to include new nodes if needed.
+ * This will be updated to include new nodes if needed.
* @param lesserPools node pools we can steal nodes from
* @return the number of additional slots that should be used for scheduling.
*/
@@ -245,21 +257,25 @@
LOG.debug("Slots... requested {} used {} free {} available {} to be used {}",
slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse);
if (slotsToUse <= 0) {
- _cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
+ cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
return 0;
}
int slotsNeeded = slotsToUse - slotsFree;
int numNewNodes = NodePool.getNodeCountIfSlotsWereTaken(slotsNeeded, lesserPools);
LOG.debug("Nodes... new {} used {} max {}",
- numNewNodes, _usedNodes, _maxNodes);
- if ((numNewNodes + _usedNodes) > _maxNodes) {
- _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. " +
- (numNewNodes - (_maxNodes - _usedNodes)) + " more nodes needed to run topology.");
+ numNewNodes, usedNodes, maxNodes);
+ if ((numNewNodes + usedNodes) > maxNodes) {
+ cluster.setStatus(topId,
+ "Max Nodes("
+ + maxNodes
+ + ") for this user would be exceeded. "
+ + (numNewNodes - (maxNodes - usedNodes))
+ + " more nodes needed to run topology.");
return 0;
}
Collection<Node> found = NodePool.takeNodesBySlot(slotsNeeded, lesserPools);
- _usedNodes += found.size();
+ usedNodes += found.size();
allNodes.addAll(found);
return slotsToUse;
}
@@ -268,8 +284,8 @@
public Collection<Node> takeNodes(int nodesNeeded) {
LOG.debug("Taking {} from {}", nodesNeeded, this);
HashSet<Node> ret = new HashSet<>();
- for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
- if (!_isolated.contains(entry.getKey())) {
+ for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+ if (!isolated.contains(entry.getKey())) {
Iterator<Node> it = entry.getValue().iterator();
while (it.hasNext()) {
if (nodesNeeded <= 0) {
@@ -277,10 +293,10 @@
}
Node n = it.next();
it.remove();
- n.freeAllSlots(_cluster);
+ n.freeAllSlots(cluster);
ret.add(n);
nodesNeeded--;
- _usedNodes--;
+ usedNodes--;
}
}
}
@@ -290,8 +306,8 @@
@Override
public int nodesAvailable() {
int total = 0;
- for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
- if (!_isolated.contains(entry.getKey())) {
+ for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+ if (!isolated.contains(entry.getKey())) {
total += entry.getValue().size();
}
}
@@ -301,8 +317,8 @@
@Override
public int slotsAvailable() {
int total = 0;
- for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
- if (!_isolated.contains(entry.getKey())) {
+ for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+ if (!isolated.contains(entry.getKey())) {
total += Node.countTotalSlotsAlive(entry.getValue());
}
}
@@ -312,15 +328,15 @@
@Override
public Collection<Node> takeNodesBySlots(int slotsNeeded) {
HashSet<Node> ret = new HashSet<>();
- for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
- if (!_isolated.contains(entry.getKey())) {
+ for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+ if (!isolated.contains(entry.getKey())) {
Iterator<Node> it = entry.getValue().iterator();
while (it.hasNext()) {
Node n = it.next();
if (n.isAlive()) {
it.remove();
- _usedNodes--;
- n.freeAllSlots(_cluster);
+ usedNodes--;
+ n.freeAllSlots(cluster);
ret.add(n);
slotsNeeded -= n.totalSlots();
if (slotsNeeded <= 0) {
@@ -337,8 +353,8 @@
public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
int nodesFound = 0;
int slotsFound = 0;
- for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
- if (!_isolated.contains(entry.getKey())) {
+ for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+ if (!isolated.contains(entry.getKey())) {
for (Node n : entry.getValue()) {
if (n.isAlive()) {
nodesFound++;
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
index 89fe462..0c2ba56 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
@@ -42,17 +42,17 @@
}
};
private static final Logger LOG = LoggerFactory.getLogger(Node.class);
- private final String _nodeId;
- private Map<String, Set<WorkerSlot>> _topIdToUsedSlots = new HashMap<>();
- private Set<WorkerSlot> _freeSlots = new HashSet<>();
- private boolean _isAlive;
+ private final String nodeId;
+ private Map<String, Set<WorkerSlot>> topIdToUsedSlots = new HashMap<>();
+ private Set<WorkerSlot> freeSlots = new HashSet<>();
+ private boolean isAlive;
public Node(String nodeId, Set<Integer> allPorts, boolean isAlive) {
- _nodeId = nodeId;
- _isAlive = isAlive;
- if (_isAlive && allPorts != null) {
+ this.nodeId = nodeId;
+ this.isAlive = isAlive;
+ if (this.isAlive && allPorts != null) {
for (int port : allPorts) {
- _freeSlots.add(new WorkerSlot(_nodeId, port));
+ freeSlots.add(new WorkerSlot(this.nodeId, port));
}
}
}
@@ -119,8 +119,11 @@
node.addOrphanedSlot(ws);
}
if (node.assignInternal(ws, topId, true)) {
- LOG.warn("Bad scheduling state for topology [" + topId + "], the slot " +
- ws + " assigned to multiple workers, un-assigning everything...");
+ LOG.warn("Bad scheduling state for topology ["
+ + topId
+ + "], the slot "
+ + ws
+ + " assigned to multiple workers, un-assigning everything...");
node.free(ws, cluster, true);
}
}
@@ -130,95 +133,110 @@
}
public String getId() {
- return _nodeId;
+ return nodeId;
}
public boolean isAlive() {
- return _isAlive;
+ return isAlive;
}
/**
+ * Get running topologies.
* @return a collection of the topology ids currently running on this node
*/
public Collection<String> getRunningTopologies() {
- return _topIdToUsedSlots.keySet();
+ return topIdToUsedSlots.keySet();
}
public boolean isTotallyFree() {
- return _topIdToUsedSlots.isEmpty();
+ return topIdToUsedSlots.isEmpty();
}
public int totalSlotsFree() {
- return _freeSlots.size();
+ return freeSlots.size();
}
public int totalSlotsUsed() {
int total = 0;
- for (Set<WorkerSlot> slots : _topIdToUsedSlots.values()) {
+ for (Set<WorkerSlot> slots : topIdToUsedSlots.values()) {
total += slots.size();
}
return total;
}
+ public int totalSlotsUsed(String topId) {
+ int total = 0;
+ Set<WorkerSlot> slots = topIdToUsedSlots.get(topId);
+ if (slots != null) {
+ total = slots.size();
+ }
+ return total;
+ }
+
public int totalSlots() {
return totalSlotsFree() + totalSlotsUsed();
}
- public int totalSlotsUsed(String topId) {
- int total = 0;
- Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
- if (slots != null) {
- total = slots.size();
- }
- return total;
- }
-
private void validateSlot(WorkerSlot ws) {
- if (!_nodeId.equals(ws.getNodeId())) {
- throw new IllegalArgumentException(
- "Trying to add a slot to the wrong node " + ws +
- " is not a part of " + _nodeId);
+ if (!nodeId.equals(ws.getNodeId())) {
+ throw new IllegalArgumentException("Trying to add a slot to the wrong node "
+ + ws
+ + " is not a part of "
+ + nodeId);
}
}
private void addOrphanedSlot(WorkerSlot ws) {
- if (_isAlive) {
- throw new IllegalArgumentException("Orphaned Slots " +
- "only are allowed on dead nodes.");
+ if (isAlive) {
+ throw new IllegalArgumentException("Orphaned Slots only are allowed on dead nodes.");
}
validateSlot(ws);
- if (_freeSlots.contains(ws)) {
+ if (freeSlots.contains(ws)) {
return;
}
- for (Set<WorkerSlot> used : _topIdToUsedSlots.values()) {
+ for (Set<WorkerSlot> used : topIdToUsedSlots.values()) {
if (used.contains(ws)) {
return;
}
}
- _freeSlots.add(ws);
+ freeSlots.add(ws);
}
boolean assignInternal(WorkerSlot ws, String topId, boolean dontThrow) {
validateSlot(ws);
- if (!_freeSlots.remove(ws)) {
- for (Entry<String, Set<WorkerSlot>> topologySetEntry : _topIdToUsedSlots.entrySet()) {
+ if (!freeSlots.remove(ws)) {
+ for (Entry<String, Set<WorkerSlot>> topologySetEntry : topIdToUsedSlots.entrySet()) {
if (topologySetEntry.getValue().contains(ws)) {
if (dontThrow) {
- LOG.warn("Worker slot [" + ws + "] can't be assigned to " + topId +
- ". Its already assigned to " + topologySetEntry.getKey() + ".");
+ LOG.warn("Worker slot ["
+ + ws
+ + "] can't be assigned to "
+ + topId
+ + ". Its already assigned to "
+ + topologySetEntry.getKey()
+ + ".");
return true;
}
- throw new IllegalStateException("Worker slot [" + ws + "] can't be assigned to "
- + topId + ". Its already assigned to " + topologySetEntry.getKey() + ".");
+ throw new IllegalStateException("Worker slot ["
+ + ws
+ + "] can't be assigned to "
+ + topId
+ + ". Its already assigned to "
+ + topologySetEntry.getKey()
+ + ".");
}
}
- LOG.warn("Adding Worker slot [" + ws + "] that was not reported in the supervisor heartbeats," +
- " but the worker is already running for topology " + topId + ".");
+ LOG.warn("Adding Worker slot ["
+ + ws
+ + "] that was not reported in the supervisor heartbeats,"
+ + " but the worker is already running for topology "
+ + topId
+ + ".");
}
- Set<WorkerSlot> usedSlots = _topIdToUsedSlots.get(topId);
+ Set<WorkerSlot> usedSlots = topIdToUsedSlots.get(topId);
if (usedSlots == null) {
usedSlots = new HashSet<>();
- _topIdToUsedSlots.put(topId, usedSlots);
+ topIdToUsedSlots.put(topId, usedSlots);
}
usedSlots.add(ws);
return false;
@@ -229,32 +247,34 @@
* @param cluster the cluster to be updated
*/
public void freeAllSlots(Cluster cluster) {
- if (!_isAlive) {
- LOG.warn("Freeing all slots on a dead node {} ", _nodeId);
+ if (!isAlive) {
+ LOG.warn("Freeing all slots on a dead node {} ", nodeId);
}
- for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+ for (Entry<String, Set<WorkerSlot>> entry : topIdToUsedSlots.entrySet()) {
cluster.freeSlots(entry.getValue());
- if (_isAlive) {
- _freeSlots.addAll(entry.getValue());
+ if (isAlive) {
+ freeSlots.addAll(entry.getValue());
}
}
- _topIdToUsedSlots = new HashMap<>();
+ topIdToUsedSlots = new HashMap<>();
}
/**
- * Frees a single slot in this node
+ * Frees a single slot in this node.
* @param ws the slot to free
* @param cluster the cluster to update
*/
public void free(WorkerSlot ws, Cluster cluster, boolean forceFree) {
- if (_freeSlots.contains(ws)) return;
+ if (freeSlots.contains(ws)) {
+ return;
+ }
boolean wasFound = false;
- for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+ for (Entry<String, Set<WorkerSlot>> entry : topIdToUsedSlots.entrySet()) {
Set<WorkerSlot> slots = entry.getValue();
if (slots.remove(ws)) {
cluster.freeSlot(ws);
- if (_isAlive) {
- _freeSlots.add(ws);
+ if (isAlive) {
+ freeSlots.add(ws);
}
wasFound = true;
}
@@ -263,10 +283,11 @@
if (forceFree) {
LOG.info("Forcefully freeing the " + ws);
cluster.freeSlot(ws);
- _freeSlots.add(ws);
+ freeSlots.add(ws);
} else {
- throw new IllegalArgumentException("Tried to free a slot that was not" +
- " part of this node " + _nodeId);
+ throw new IllegalArgumentException("Tried to free a slot that was not"
+ + " part of this node "
+ + nodeId);
}
}
}
@@ -277,15 +298,17 @@
* @param cluster the cluster to update
*/
public void freeTopology(String topId, Cluster cluster) {
- Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
- if (slots == null || slots.isEmpty()) return;
+ Set<WorkerSlot> slots = topIdToUsedSlots.get(topId);
+ if (slots == null || slots.isEmpty()) {
+ return;
+ }
for (WorkerSlot ws : slots) {
cluster.freeSlot(ws);
- if (_isAlive) {
- _freeSlots.add(ws);
+ if (isAlive) {
+ freeSlots.add(ws);
}
}
- _topIdToUsedSlots.remove(topId);
+ topIdToUsedSlots.remove(topId);
}
/**
@@ -297,16 +320,16 @@
*/
public void assign(String topId, Collection<ExecutorDetails> executors,
Cluster cluster) {
- if (!_isAlive) {
- throw new IllegalStateException("Trying to adding to a dead node " + _nodeId);
+ if (!isAlive) {
+ throw new IllegalStateException("Trying to adding to a dead node " + nodeId);
}
- if (_freeSlots.isEmpty()) {
- throw new IllegalStateException("Trying to assign to a full node " + _nodeId);
+ if (freeSlots.isEmpty()) {
+ throw new IllegalStateException("Trying to assign to a full node " + nodeId);
}
if (executors.size() == 0) {
- LOG.warn("Trying to assign nothing from " + topId + " to " + _nodeId + " (Ignored)");
+ LOG.warn("Trying to assign nothing from " + topId + " to " + nodeId + " (Ignored)");
} else {
- WorkerSlot slot = _freeSlots.iterator().next();
+ WorkerSlot slot = freeSlots.iterator().next();
cluster.assign(slot, topId, executors);
assignInternal(slot, topId, false);
}
@@ -314,16 +337,16 @@
@Override
public boolean equals(Object other) {
- return other instanceof Node && _nodeId.equals(((Node) other)._nodeId);
+ return other instanceof Node && nodeId.equals(((Node) other).nodeId);
}
@Override
public int hashCode() {
- return _nodeId.hashCode();
+ return nodeId.hashCode();
}
@Override
public String toString() {
- return "Node: " + _nodeId;
+ return "Node: " + nodeId;
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
index 21ffbf5..477f243 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
@@ -35,8 +35,8 @@
*/
public abstract class NodePool {
private static final Logger LOG = LoggerFactory.getLogger(NodePool.class);
- protected Cluster _cluster;
- protected Map<String, Node> _nodeIdToNode;
+ protected Cluster cluster;
+ protected Map<String, Node> nodeIdToNode;
public static int slotsAvailable(NodePool[] pools) {
int slotsAvailable = 0;
@@ -46,6 +46,12 @@
return slotsAvailable;
}
+ /**
+ * Get number of available slots.
+ * @return the number of slots that are available to be taken
+ */
+ public abstract int slotsAvailable();
+
public static int nodesAvailable(NodePool[] pools) {
int nodesAvailable = 0;
for (NodePool pool : pools) {
@@ -54,6 +60,12 @@
return nodesAvailable;
}
+ /**
+ * Get the number of available nodes.
+ * @return the number of nodes that are available to be taken
+ */
+ public abstract int nodesAvailable();
+
public static Collection<Node> takeNodesBySlot(int slotsNeeded, NodePool[] pools) {
LOG.debug("Trying to grab {} free slots from {}", slotsNeeded, pools);
HashSet<Node> ret = new HashSet<>();
@@ -89,8 +101,8 @@
int total = 0;
for (NodePool pool : pools) {
NodeAndSlotCounts ns = pool.getNodeAndSlotCountIfSlotsWereTaken(slots);
- total += ns._nodes;
- slots -= ns._slots;
+ total += ns.nodes;
+ slots -= ns.slots;
LOG.debug("Found {} nodes so far {} more slots needed", total, slots);
if (slots <= 0) {
break;
@@ -105,55 +117,46 @@
* @param nodeIdToNode the mapping of node id to nodes
*/
public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
- _cluster = cluster;
- _nodeIdToNode = nodeIdToNode;
+ this.cluster = cluster;
+ this.nodeIdToNode = nodeIdToNode;
}
/**
- * Add a topology to the pool
- * @param td the topology to add.
+ * Add a topology to the pool.
+ * @param td the topology to add
*/
public abstract void addTopology(TopologyDetails td);
/**
- * Check if this topology can be added to this pool
+ * Check if this topology can be added to this pool.
* @param td the topology
* @return true if it can else false
*/
public abstract boolean canAdd(TopologyDetails td);
/**
- * @return the number of nodes that are available to be taken
- */
- public abstract int slotsAvailable();
-
- /**
- * Take nodes from this pool that can fulfill possibly up to the
- * slotsNeeded
+ * Take nodes from this pool that can fulfill possibly up to the slotsNeeded.
* @param slotsNeeded the number of slots that are needed.
* @return a Collection of nodes with the removed nodes in it.
- * This may be empty, but should not be null.
+ * This may be empty, but should not be null.
*/
public abstract Collection<Node> takeNodesBySlots(int slotsNeeded);
/**
- * Get the number of nodes and slots this would provide to get the slots needed
+ * Get the number of nodes and slots this would provide to get the slots needed.
* @param slots the number of slots needed
* @return the number of nodes and slots that would be returned.
*/
public abstract NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slots);
/**
- * @return the number of nodes that are available to be taken
- */
- public abstract int nodesAvailable();
-
- /**
- * Take up to nodesNeeded from this pool
+ * Take up to nodesNeeded from this pool.
* @param nodesNeeded the number of nodes that are needed.
* @return a Collection of nodes with the removed nodes in it.
- * This may be empty, but should not be null.
+ * This may be empty, but should not be null.
*/
+ @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
+ //simply suppress until https://github.com/checkstyle/checkstyle/issues/3770 is resolved
public abstract Collection<Node> takeNodes(int nodesNeeded);
/**
@@ -163,12 +166,12 @@
public abstract void scheduleAsNeeded(NodePool... lesserPools);
public static class NodeAndSlotCounts {
- public final int _nodes;
- public final int _slots;
+ public final int nodes;
+ public final int slots;
public NodeAndSlotCounts(int nodes, int slots) {
- _nodes = nodes;
- _slots = slots;
+ this.nodes = nodes;
+ this.slots = slots;
}
}
@@ -177,74 +180,74 @@
* component spreading among different hosts.
*/
public static class RoundRobinSlotScheduler {
- private Map<String, Set<String>> _nodeToComps;
- private HashMap<String, List<ExecutorDetails>> _spreadToSchedule;
- private LinkedList<Set<ExecutorDetails>> _slots;
- private Set<ExecutorDetails> _lastSlot;
- private Cluster _cluster;
- private String _topId;
+ private Map<String, Set<String>> nodeToComps;
+ private HashMap<String, List<ExecutorDetails>> spreadToSchedule;
+ private LinkedList<Set<ExecutorDetails>> slots;
+ private Set<ExecutorDetails> lastSlot;
+ private Cluster cluster;
+ private String topId;
/**
- * Create a new scheduler for a given topology
+ * Create a new scheduler for a given topology.
* @param td the topology to schedule
* @param slotsToUse the number of slots to use for the executors left to
- * schedule.
+ * schedule.
* @param cluster the cluster to schedule this on.
*/
public RoundRobinSlotScheduler(TopologyDetails td, int slotsToUse,
Cluster cluster) {
- _topId = td.getId();
- _cluster = cluster;
+ topId = td.getId();
+ this.cluster = cluster;
Map<ExecutorDetails, String> execToComp = td.getExecutorToComponent();
- SchedulerAssignment assignment = _cluster.getAssignmentById(_topId);
- _nodeToComps = new HashMap<>();
+ SchedulerAssignment assignment = this.cluster.getAssignmentById(topId);
+ nodeToComps = new HashMap<>();
if (assignment != null) {
Map<ExecutorDetails, WorkerSlot> execToSlot = assignment.getExecutorToSlot();
for (Entry<ExecutorDetails, WorkerSlot> entry : execToSlot.entrySet()) {
String nodeId = entry.getValue().getNodeId();
- Set<String> comps = _nodeToComps.get(nodeId);
+ Set<String> comps = nodeToComps.get(nodeId);
if (comps == null) {
comps = new HashSet<>();
- _nodeToComps.put(nodeId, comps);
+ nodeToComps.put(nodeId, comps);
}
comps.add(execToComp.get(entry.getKey()));
}
}
- _spreadToSchedule = new HashMap<>();
+ spreadToSchedule = new HashMap<>();
List<String> spreadComps = (List<String>) td.getConf().get(Config.TOPOLOGY_SPREAD_COMPONENTS);
if (spreadComps != null) {
for (String comp : spreadComps) {
- _spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
+ spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
}
}
- _slots = new LinkedList<>();
+ slots = new LinkedList<>();
for (int i = 0; i < slotsToUse; i++) {
- _slots.add(new HashSet<ExecutorDetails>());
+ slots.add(new HashSet<ExecutorDetails>());
}
int at = 0;
- for (Entry<String, List<ExecutorDetails>> entry : _cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
+ for (Entry<String, List<ExecutorDetails>> entry : this.cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
LOG.debug("Scheduling for {}", entry.getKey());
- if (_spreadToSchedule.containsKey(entry.getKey())) {
+ if (spreadToSchedule.containsKey(entry.getKey())) {
LOG.debug("Saving {} for spread...", entry.getKey());
- _spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
+ spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
} else {
for (ExecutorDetails ed : entry.getValue()) {
LOG.debug("Assigning {} {} to slot {}", entry.getKey(), ed, at);
- _slots.get(at).add(ed);
+ slots.get(at).add(ed);
at++;
- if (at >= _slots.size()) {
+ if (at >= slots.size()) {
at = 0;
}
}
}
}
- _lastSlot = _slots.get(_slots.size() - 1);
+ lastSlot = slots.get(slots.size() - 1);
}
/**
@@ -253,25 +256,25 @@
* @return true if there are more slots to assign else false.
*/
public boolean assignSlotTo(Node n) {
- if (_slots.isEmpty()) {
+ if (slots.isEmpty()) {
return false;
}
- Set<ExecutorDetails> slot = _slots.pop();
- if (slot == _lastSlot) {
+ Set<ExecutorDetails> slot = slots.pop();
+ if (slot == lastSlot) {
//The last slot fill it up
- for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+ for (Entry<String, List<ExecutorDetails>> entry : spreadToSchedule.entrySet()) {
if (entry.getValue().size() > 0) {
slot.addAll(entry.getValue());
}
}
} else {
String nodeId = n.getId();
- Set<String> nodeComps = _nodeToComps.get(nodeId);
+ Set<String> nodeComps = nodeToComps.get(nodeId);
if (nodeComps == null) {
nodeComps = new HashSet<>();
- _nodeToComps.put(nodeId, nodeComps);
+ nodeToComps.put(nodeId, nodeComps);
}
- for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+ for (Entry<String, List<ExecutorDetails>> entry : spreadToSchedule.entrySet()) {
if (entry.getValue().size() > 0) {
String comp = entry.getKey();
if (!nodeComps.contains(comp)) {
@@ -281,8 +284,8 @@
}
}
}
- n.assign(_topId, slot, _cluster);
- return !_slots.isEmpty();
+ n.assign(topId, slot, cluster);
+ return !slots.isEmpty();
}
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
similarity index 98%
rename from storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java
rename to storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
index ddea221..9d12428 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
@@ -39,8 +39,8 @@
/**
* Represents a single node in the cluster.
*/
-public class RAS_Node {
- private static final Logger LOG = LoggerFactory.getLogger(RAS_Node.class);
+public class RasNode {
+ private static final Logger LOG = LoggerFactory.getLogger(RasNode.class);
private final String nodeId;
private final Cluster cluster;
private final Set<WorkerSlot> originallyFreeSlots;
@@ -63,7 +63,7 @@
* @param workerIdToWorker the mapping of slots already assigned to this node.
* @param assignmentMap the mapping of executors already assigned to this node.
*/
- public RAS_Node(
+ public RasNode(
String nodeId,
SupervisorDetails sup,
Cluster cluster,
@@ -391,8 +391,8 @@
@Override
public boolean equals(Object other) {
- if (other instanceof RAS_Node) {
- return nodeId.equals(((RAS_Node) other).nodeId);
+ if (other instanceof RasNode) {
+ return nodeId.equals(((RasNode) other).nodeId);
}
return false;
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
similarity index 89%
rename from storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java
rename to storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
index 584934b..9ddbcf4b 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
@@ -30,19 +30,19 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class RAS_Nodes {
+public class RasNodes {
- private static final Logger LOG = LoggerFactory.getLogger(RAS_Nodes.class);
- private Map<String, RAS_Node> nodeMap;
+ private static final Logger LOG = LoggerFactory.getLogger(RasNodes.class);
+ private Map<String, RasNode> nodeMap;
- public RAS_Nodes(Cluster cluster) {
+ public RasNodes(Cluster cluster) {
this.nodeMap = getAllNodesFrom(cluster);
}
- public static Map<String, RAS_Node> getAllNodesFrom(Cluster cluster) {
+ public static Map<String, RasNode> getAllNodesFrom(Cluster cluster) {
//A map of node ids to node objects
- Map<String, RAS_Node> nodeIdToNode = new HashMap<>();
+ Map<String, RasNode> nodeIdToNode = new HashMap<>();
//A map of assignments organized by node with the following format:
//{nodeId -> {topologyId -> {workerId -> {execs}}}}
Map<String, Map<String, Map<String, Collection<ExecutorDetails>>>> assignmentRelationshipMap = new HashMap<>();
@@ -55,7 +55,6 @@
assignment.getSlotToExecutors().entrySet()) {
WorkerSlot slot = entry.getKey();
String nodeId = slot.getNodeId();
- Collection<ExecutorDetails> execs = entry.getValue();
if (!assignmentRelationshipMap.containsKey(nodeId)) {
assignmentRelationshipMap.put(
nodeId, new HashMap<String, Map<String, Collection<ExecutorDetails>>>());
@@ -73,6 +72,7 @@
.get(topId)
.put(slot.getId(), new LinkedList<ExecutorDetails>());
}
+ Collection<ExecutorDetails> execs = entry.getValue();
assignmentRelationshipMap.get(nodeId).get(topId).get(slot.getId()).addAll(execs);
}
}
@@ -90,7 +90,7 @@
}
nodeIdToNode.put(
sup.getId(),
- new RAS_Node(
+ new RasNode(
sup.getId(),
sup,
cluster,
@@ -109,7 +109,7 @@
nodeId,
assignments);
nodeIdToNode.put(
- nodeId, new RAS_Node(nodeId, null, cluster, workerIdToWorker.get(nodeId), assignments));
+ nodeId, new RasNode(nodeId, null, cluster, workerIdToWorker.get(nodeId), assignments));
}
}
return nodeIdToNode;
@@ -118,7 +118,7 @@
/**
* get node object from nodeId.
*/
- public RAS_Node getNodeById(String nodeId) {
+ public RasNode getNodeById(String nodeId) {
return this.nodeMap.get(nodeId);
}
@@ -128,7 +128,7 @@
* @param workerSlots the slots to free
*/
public void freeSlots(Collection<WorkerSlot> workerSlots) {
- for (RAS_Node node : nodeMap.values()) {
+ for (RasNode node : nodeMap.values()) {
for (WorkerSlot ws : node.getUsedSlots()) {
if (workerSlots.contains(ws)) {
LOG.debug("freeing ws {} on node {}", ws, node);
@@ -138,14 +138,14 @@
}
}
- public Collection<RAS_Node> getNodes() {
+ public Collection<RasNode> getNodes() {
return this.nodeMap.values();
}
@Override
public String toString() {
StringBuilder ret = new StringBuilder();
- for (RAS_Node node : nodeMap.values()) {
+ for (RasNode node : nodeMap.values()) {
ret.append(node).append("\n");
}
return ret.toString();
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
index 5de265c..43ef699 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
@@ -123,7 +123,7 @@
List<TopologyDetails> orderedTopologies) {
//A copy of cluster that we can modify, but does not get committed back to cluster unless scheduling succeeds
Cluster workingState = new Cluster(cluster);
- RAS_Nodes nodes = new RAS_Nodes(workingState);
+ RasNodes nodes = new RasNodes(workingState);
IStrategy rasStrategy = null;
String strategyConf = (String) td.getConf().get(Config.TOPOLOGY_SCHEDULER_STRATEGY);
try {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
index 2e1ca79..4cc0a85 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
@@ -137,8 +137,8 @@
}
/**
- * Comparator that sorts topologies by priority and then by submission time
- * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort
+ * Comparator that sorts topologies by priority and then by submission time.
+ * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort.
*/
private static class TopologyByPriorityAndSubmissionTimeComparator implements Comparator<TopologyDetails> {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
index 229f5b5..0076c75 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
@@ -20,6 +20,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
public class FIFOSchedulingPriorityStrategy extends DefaultSchedulingPriorityStrategy {
private static final Logger LOG = LoggerFactory.getLogger(FIFOSchedulingPriorityStrategy.class);
@@ -54,8 +55,8 @@
}
/**
- * Comparator that sorts topologies by priority and then by submission time
- * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort
+ * Comparator that sorts topologies by priority and then by submission time.
+ * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort.
*/
private static class TopologyBySubmissionTimeComparator implements Comparator<TopologyDetails> {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
index 4db19ac..6d75b33 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
@@ -39,8 +39,8 @@
import org.apache.storm.scheduler.SchedulerAssignment;
import org.apache.storm.scheduler.TopologyDetails;
import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
-import org.apache.storm.scheduler.resource.RAS_Nodes;
+import org.apache.storm.scheduler.resource.RasNode;
+import org.apache.storm.scheduler.resource.RasNodes;
import org.apache.storm.scheduler.resource.SchedulingResult;
import org.apache.storm.scheduler.resource.SchedulingStatus;
import org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer;
@@ -58,14 +58,14 @@
private Map<String, List<String>> networkTopography;
private final Map<String, String> superIdToRack = new HashMap<>();
private final Map<String, String> superIdToHostname = new HashMap<>();
- private final Map<String, List<RAS_Node>> hostnameToNodes = new HashMap<>();
- private final Map<String, List<RAS_Node>> rackIdToNodes = new HashMap<>();
- protected RAS_Nodes nodes;
+ private final Map<String, List<RasNode>> hostnameToNodes = new HashMap<>();
+ private final Map<String, List<RasNode>> rackIdToNodes = new HashMap<>();
+ protected RasNodes nodes;
@VisibleForTesting
void prepare(Cluster cluster) {
this.cluster = cluster;
- nodes = new RAS_Nodes(cluster);
+ nodes = new RasNodes(cluster);
networkTopography = cluster.getNetworkTopography();
Map<String, String> hostToRack = new HashMap<>();
for (Map.Entry<String, List<String>> entry : networkTopography.entrySet()) {
@@ -74,7 +74,7 @@
hostToRack.put(hostName, rackId);
}
}
- for (RAS_Node node: nodes.getNodes()) {
+ for (RasNode node: nodes.getNodes()) {
String superId = node.getId();
String hostName = node.getHostname();
String rackId = hostToRack.getOrDefault(hostName, DNSToSwitchMapping.DEFAULT_RACK);
@@ -86,15 +86,15 @@
logClusterInfo();
}
- protected void setOneExecutorPerWorker(boolean oneExecutorPerWorker) {
- this.oneExecutorPerWorker = oneExecutorPerWorker;
- }
-
@Override
public void prepare(Map<String, Object> config) {
//NOOP
}
+ protected void setOneExecutorPerWorker(boolean oneExecutorPerWorker) {
+ this.oneExecutorPerWorker = oneExecutorPerWorker;
+ }
+
protected SchedulingResult mkNotEnoughResources(TopologyDetails td) {
return SchedulingResult.failure(
SchedulingStatus.FAIL_NOT_ENOUGH_RESOURCES,
@@ -113,7 +113,7 @@
ExecutorDetails exec, TopologyDetails td, Collection<ExecutorDetails> scheduledTasks, Iterable<String> sortedNodes) {
WorkerSlot targetSlot = findWorkerForExec(exec, td, sortedNodes);
if (targetSlot != null) {
- RAS_Node targetNode = idToNode(targetSlot.getNodeId());
+ RasNode targetNode = idToNode(targetSlot.getNodeId());
targetNode.assignSingleExecutor(targetSlot, exec, td);
scheduledTasks.add(exec);
LOG.debug(
@@ -150,7 +150,7 @@
*/
protected WorkerSlot findWorkerForExec(ExecutorDetails exec, TopologyDetails td, Iterable<String> sortedNodes) {
for (String id : sortedNodes) {
- RAS_Node node = nodes.getNodeById(id);
+ RasNode node = nodes.getNodeById(id);
if (node.couldEverFit(exec, td)) {
Collection<WorkerSlot> topologyUsedSlots = oneExecutorPerWorker ? node.getUsedSlots(td.getId()) : Collections.emptySet();
for (WorkerSlot ws : node.getSlotsAvailableToScheduleOn()) {
@@ -183,12 +183,12 @@
* @return a sorted list of nodes.
*/
protected TreeSet<ObjectResources> sortNodes(
- List<RAS_Node> availNodes, ExecutorDetails exec, TopologyDetails topologyDetails, String rackId,
+ List<RasNode> availNodes, ExecutorDetails exec, TopologyDetails topologyDetails, String rackId,
Map<String, AtomicInteger> scheduledCount) {
AllResources allRackResources = new AllResources("RACK");
List<ObjectResources> nodes = allRackResources.objectResources;
- for (RAS_Node rasNode : availNodes) {
+ for (RasNode rasNode : availNodes) {
String superId = rasNode.getId();
ObjectResources node = new ObjectResources(superId);
@@ -226,9 +226,9 @@
}
List<String> ret = new ArrayList<>(hosts.size());
for (String host: hosts) {
- List<RAS_Node> nodes = hostnameToNodes.get(host);
+ List<RasNode> nodes = hostnameToNodes.get(host);
if (nodes != null) {
- for (RAS_Node node : nodes) {
+ for (RasNode node : nodes) {
ret.add(node.getId());
}
}
@@ -365,7 +365,7 @@
ObjectResources rack = new ObjectResources(rackId);
racks.add(rack);
for (String nodeHost : nodeHosts) {
- for (RAS_Node node : hostnameToNodes(nodeHost)) {
+ for (RasNode node : hostnameToNodes(nodeHost)) {
rack.availableResources.add(node.getTotalAvailableResources());
rack.totalResources.add(node.getTotalAvailableResources());
}
@@ -438,7 +438,7 @@
* @param node the node to find out which rack its on
* @return the rack id
*/
- protected String nodeToRack(RAS_Node node) {
+ protected String nodeToRack(RasNode node) {
return superIdToRack.get(node.getId());
}
@@ -586,7 +586,7 @@
String rackId = clusterEntry.getKey();
LOG.debug("Rack: {}", rackId);
for (String nodeHostname : clusterEntry.getValue()) {
- for (RAS_Node node : hostnameToNodes(nodeHostname)) {
+ for (RasNode node : hostnameToNodes(nodeHostname)) {
LOG.debug("-> Node: {} {}", node.getHostname(), node.getId());
LOG.debug(
"--> Avail Resources: {Mem {}, CPU {} Slots: {}}",
@@ -610,18 +610,18 @@
* @param hostname the hostname.
* @return the ids n that node.
*/
- public List<RAS_Node> hostnameToNodes(String hostname) {
+ public List<RasNode> hostnameToNodes(String hostname) {
return hostnameToNodes.getOrDefault(hostname, Collections.emptyList());
}
/**
- * Find RAS_Node for specified node id.
+ * Find RASNode for specified node id.
*
* @param id the node/supervisor id to lookup
- * @return a RAS_Node object
+ * @return a RASNode object
*/
- public RAS_Node idToNode(String id) {
- RAS_Node ret = nodes.getNodeById(id);
+ public RasNode idToNode(String id) {
+ RasNode ret = nodes.getNodeById(id);
if (ret == null) {
LOG.error("Cannot find Node with Id: {}", id);
}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
index 9f8265d..cacba7a 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
@@ -31,8 +31,8 @@
import org.apache.storm.scheduler.SchedulerAssignment;
import org.apache.storm.scheduler.TopologyDetails;
import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
-import org.apache.storm.scheduler.resource.RAS_Nodes;
+import org.apache.storm.scheduler.resource.RasNode;
+import org.apache.storm.scheduler.resource.RasNodes;
import org.apache.storm.scheduler.resource.SchedulingResult;
import org.apache.storm.scheduler.resource.SchedulingStatus;
import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
@@ -49,7 +49,7 @@
private Map<String, Map<String, Integer>> constraintMatrix;
private HashSet<String> spreadComps = new HashSet<>();
- private Map<String, RAS_Node> nodes;
+ private Map<String, RasNode> nodes;
private Map<ExecutorDetails, String> execToComp;
private Map<String, Set<ExecutorDetails>> compToExecs;
private List<String> favoredNodeIds;
@@ -124,9 +124,9 @@
return true;
}
- private static Map<WorkerSlot, RAS_Node> workerToNodes(Cluster cluster) {
- Map<WorkerSlot, RAS_Node> workerToNodes = new HashMap<>();
- for (RAS_Node node : RAS_Nodes.getAllNodesFrom(cluster).values()) {
+ private static Map<WorkerSlot, RasNode> workerToNodes(Cluster cluster) {
+ Map<WorkerSlot, RasNode> workerToNodes = new HashMap<>();
+ for (RasNode node : RasNodes.getAllNodesFrom(cluster).values()) {
for (WorkerSlot s : node.getUsedSlots()) {
workerToNodes.put(s, node);
}
@@ -141,15 +141,15 @@
Map<ExecutorDetails, String> execToComp = topo.getExecutorToComponent();
Map<WorkerSlot, HashSet<ExecutorDetails>> workerExecMap = new HashMap<>();
Map<WorkerSlot, HashSet<String>> workerCompMap = new HashMap<>();
- Map<RAS_Node, HashSet<String>> nodeCompMap = new HashMap<>();
- Map<WorkerSlot, RAS_Node> workerToNodes = workerToNodes(cluster);
+ Map<RasNode, HashSet<String>> nodeCompMap = new HashMap<>();
+ Map<WorkerSlot, RasNode> workerToNodes = workerToNodes(cluster);
boolean ret = true;
HashSet<String> spreadComps = getSpreadComps(topo);
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : result.entrySet()) {
ExecutorDetails exec = entry.getKey();
WorkerSlot worker = entry.getValue();
- RAS_Node node = workerToNodes.get(worker);
+ RasNode node = workerToNodes.get(worker);
if (workerExecMap.computeIfAbsent(worker, (k) -> new HashSet<>()).contains(exec)) {
LOG.error("Incorrect Scheduling: Found duplicate in scheduling");
@@ -177,9 +177,9 @@
LOG.info("Checking Resources...");
assert (cluster.getAssignmentById(topo.getId()) != null);
Map<ExecutorDetails, WorkerSlot> result = cluster.getAssignmentById(topo.getId()).getExecutorToSlot();
- Map<RAS_Node, Collection<ExecutorDetails>> nodeToExecs = new HashMap<>();
+ Map<RasNode, Collection<ExecutorDetails>> nodeToExecs = new HashMap<>();
Map<ExecutorDetails, WorkerSlot> mergedExecToWorker = new HashMap<>();
- Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+ Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
//merge with existing assignments
if (cluster.getAssignmentById(topo.getId()) != null
&& cluster.getAssignmentById(topo.getId()).getExecutorToSlot() != null) {
@@ -190,7 +190,7 @@
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : mergedExecToWorker.entrySet()) {
ExecutorDetails exec = entry.getKey();
WorkerSlot worker = entry.getValue();
- RAS_Node node = nodes.get(worker.getNodeId());
+ RasNode node = nodes.get(worker.getNodeId());
if (node.getAvailableMemoryResources() < 0.0 && node.getAvailableCpuResources() < 0.0) {
LOG.error("Incorrect Scheduling: found node with negative available resources");
@@ -199,8 +199,8 @@
nodeToExecs.computeIfAbsent(node, (k) -> new HashSet<>()).add(exec);
}
- for (Map.Entry<RAS_Node, Collection<ExecutorDetails>> entry : nodeToExecs.entrySet()) {
- RAS_Node node = entry.getKey();
+ for (Map.Entry<RasNode, Collection<ExecutorDetails>> entry : nodeToExecs.entrySet()) {
+ RasNode node = entry.getKey();
Collection<ExecutorDetails> execs = entry.getValue();
double cpuUsed = 0.0;
double memoryUsed = 0.0;
@@ -244,9 +244,9 @@
public SchedulingResult schedule(Cluster cluster, TopologyDetails td) {
prepare(cluster);
LOG.debug("Scheduling {}", td.getId());
- nodes = RAS_Nodes.getAllNodesFrom(cluster);
+ nodes = RasNodes.getAllNodesFrom(cluster);
Map<WorkerSlot, Set<String>> workerCompAssignment = new HashMap<>();
- Map<RAS_Node, Set<String>> nodeCompAssignment = new HashMap<>();
+ Map<RasNode, Set<String>> nodeCompAssignment = new HashMap<>();
int confMaxStateSearch = ObjectReader.getInt(td.getConf().get(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH));
int daemonMaxStateSearch = ObjectReader.getInt(cluster.getConf().get(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_STATE_SEARCH));
@@ -279,7 +279,7 @@
if (existingAssignment != null) {
existingAssignment.getExecutorToSlot().forEach((exec, ws) -> {
String compId = execToComp.get(exec);
- RAS_Node node = nodes.get(ws.getNodeId());
+ RasNode node = nodes.get(ws.getNodeId());
//populate node to component Assignments
nodeCompAssignment.computeIfAbsent(node, (k) -> new HashSet<>()).add(compId);
//populate worker to comp assignments
@@ -337,7 +337,7 @@
Iterable<String> sortedNodes = sortAllNodes(state.td, exec, favoredNodeIds, unFavoredNodeIds);
for (String nodeId: sortedNodes) {
- RAS_Node node = nodes.get(nodeId);
+ RasNode node = nodes.get(nodeId);
for (WorkerSlot workerSlot : node.getSlotsAvailableToScheduleOn()) {
if (isExecAssignmentToWorkerValid(workerSlot, state)) {
state.tryToSchedule(execToComp, node, workerSlot);
@@ -374,7 +374,7 @@
public boolean isExecAssignmentToWorkerValid(WorkerSlot worker, SearcherState state) {
final ExecutorDetails exec = state.currentExec();
//check resources
- RAS_Node node = nodes.get(worker.getNodeId());
+ RasNode node = nodes.get(worker.getNodeId());
if (!node.wouldFit(worker, exec, state.td)) {
LOG.trace("{} would not fit in resources available on {}", exec, worker);
return false;
@@ -481,7 +481,7 @@
private final Map<WorkerSlot, Set<String>> workerCompAssignment;
private final boolean[] okToRemoveFromWorker;
// for the currently tested assignment a Map of the node to the components on it to be able to enforce constraints
- private final Map<RAS_Node, Set<String>> nodeCompAssignment;
+ private final Map<RasNode, Set<String>> nodeCompAssignment;
private final boolean[] okToRemoveFromNode;
// Static State
// The list of all executors (preferably sorted to make assignments simpler).
@@ -499,7 +499,7 @@
// The current executor we are trying to schedule
private int execIndex = 0;
- private SearcherState(Map<WorkerSlot, Set<String>> workerCompAssignment, Map<RAS_Node, Set<String>> nodeCompAssignment,
+ private SearcherState(Map<WorkerSlot, Set<String>> workerCompAssignment, Map<RasNode, Set<String>> nodeCompAssignment,
int maxStatesSearched, long maxTimeMs, List<ExecutorDetails> execs, TopologyDetails td) {
assert !execs.isEmpty();
assert execs != null;
@@ -551,7 +551,7 @@
return execs.get(execIndex);
}
- public void tryToSchedule(Map<ExecutorDetails, String> execToComp, RAS_Node node, WorkerSlot workerSlot) {
+ public void tryToSchedule(Map<ExecutorDetails, String> execToComp, RasNode node, WorkerSlot workerSlot) {
ExecutorDetails exec = currentExec();
String comp = execToComp.get(exec);
LOG.trace("Trying assignment of {} {} to {}", exec, comp, workerSlot);
@@ -561,7 +561,7 @@
node.assignSingleExecutor(workerSlot, exec, td);
}
- public void backtrack(Map<ExecutorDetails, String> execToComp, RAS_Node node, WorkerSlot workerSlot) {
+ public void backtrack(Map<ExecutorDetails, String> execToComp, RasNode node, WorkerSlot workerSlot) {
execIndex--;
if (execIndex < 0) {
throw new IllegalStateException("Internal Error: exec index became negative");
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java b/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
index cfb6bb7..e8f8a94 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
@@ -64,7 +64,7 @@
private int lastReturnedTime = 0;
private int timeoutSeconds = DEFAULT_TIMEOUT_SECS;
private Map<String, Object> lastReturnedValue;
- private URI targetURI = null;
+ private URI targetUri = null;
private JSONParser jsonParser;
private String scheme;
@@ -87,8 +87,8 @@
LOG.error("No URI defined in {} configuration.", DaemonConfig.SCHEDULER_CONFIG_LOADER_URI);
} else {
try {
- targetURI = new URI(uriString);
- scheme = targetURI.getScheme().substring(ARTIFACTORY_SCHEME_PREFIX.length());
+ targetUri = new URI(uriString);
+ scheme = targetUri.getScheme().substring(ARTIFACTORY_SCHEME_PREFIX.length());
} catch (URISyntaxException e) {
LOG.error("Failed to parse uri={}", uriString);
}
@@ -103,7 +103,7 @@
*/
@Override
public Map<String, Object> load(String configKey) {
- if (targetURI == null) {
+ if (targetUri == null) {
return null;
}
@@ -116,25 +116,25 @@
}
try {
- Map<String, Object> raw = loadFromURI(targetURI);
+ Map<String, Object> raw = loadFromUri(targetUri);
if (raw != null) {
return (Map<String, Object>) raw.get(configKey);
}
} catch (Exception e) {
- LOG.error("Failed to load from uri {}", targetURI);
+ LOG.error("Failed to load from uri {}", targetUri);
}
return null;
}
/**
+ * Protected so we can override this in unit tests.
+ *
* @param api null if we are trying to download artifact, otherwise a string to call REST api,
* e.g. "/api/storage"
* @param artifact location of artifact
* @param host Artifactory hostname
* @param port Artifactory port
* @return null on failure or the response string if return code is in 200 range
- *
- * <p>Protected so we can override this in unit tests
*/
protected String doGet(String api, String artifact, String host, Integer port) {
URIBuilder builder = new URIBuilder().setScheme(scheme).setHost(host).setPort(port);
@@ -158,7 +158,7 @@
LOG.debug("About to issue a GET to {}", builder);
HttpGet httpget = new HttpGet(builder.build());
String responseBody;
- responseBody = httpclient.execute(httpget, GETStringResponseHandler.getInstance());
+ responseBody = httpclient.execute(httpget, GetStringResponseHandler.getInstance());
returnValue = responseBody;
} catch (Exception e) {
LOG.error("Received exception while connecting to Artifactory", e);
@@ -196,10 +196,10 @@
LOG.error("got null metadata");
return null;
}
- String downloadURI = (String) json.get("downloadUri");
+ String downloadUri = (String) json.get("downloadUri");
// This means we are pointing at a file.
- if (downloadURI != null) {
+ if (downloadUri != null) {
// Then get it and return the file as string.
String returnValue = doGet(null, location, host, port);
saveInArtifactoryCache(returnValue);
@@ -298,7 +298,7 @@
cacheInitialized = true;
}
- private Map<String, Object> loadFromURI(URI uri) throws IOException {
+ private Map<String, Object> loadFromUri(URI uri) throws IOException {
String host = uri.getHost();
Integer port = uri.getPort();
String location = uri.getPath();
@@ -342,20 +342,22 @@
/**
* A private class used to check the response coming back from httpclient.
*/
- private static class GETStringResponseHandler implements ResponseHandler<String> {
- private static GETStringResponseHandler singleton = null;
+ private static class GetStringResponseHandler implements ResponseHandler<String> {
+ private static GetStringResponseHandler singleton = null;
/**
+ * Get instance.
* @return a singleton httpclient GET response handler
*/
- public static GETStringResponseHandler getInstance() {
+ public static GetStringResponseHandler getInstance() {
if (singleton == null) {
- singleton = new GETStringResponseHandler();
+ singleton = new GetStringResponseHandler();
}
return singleton;
}
/**
+ * Handle response.
* @param response The http response to verify.
* @return null on failure or the response string if return code is in 200 range
*/
@@ -374,6 +376,7 @@
}
private class DirEntryCompare implements Comparator<JSONObject> {
+
@Override
public int compare(JSONObject o1, JSONObject o2) {
return ((String) o1.get("uri")).compareTo((String) o2.get("uri"));
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java b/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
index b6f4b3b..1da3a9b 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
@@ -26,7 +26,7 @@
LoggerFactory.getLogger(DefaultHttpCredentialsPlugin.class);
/**
- * No-op
+ * No-op.
*
* @param topoConf Storm configuration
*/
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java b/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
index b14ce85..ad7daaa 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
@@ -22,11 +22,12 @@
import javax.servlet.http.HttpServletRequest;
/**
- * Interface for handling credentials in an HttpServletRequest
+ * Interface for handling credentials in an HttpServletRequest.
*/
public interface IHttpCredentialsPlugin {
+
/**
- * Invoked once immediately after construction
+ * Invoked once immediately after construction.
*
* @param topoConf Storm configuration
*/
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java b/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
index 2b1c299..31c9f3c 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
@@ -39,7 +39,7 @@
}
/**
- * Construct an HttpServletRequest credential plugin specified by the UI storm configuration
+ * Construct an HttpServletRequest credential plugin specified by the UI storm configuration.
*
* @param conf storm configuration
* @return the plugin
@@ -50,7 +50,7 @@
}
/**
- * Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration
+ * Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration.
*
* @param conf storm configuration
* @return the plugin
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java b/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
index a87d448..1490328 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
@@ -128,9 +128,9 @@
* @param topologyId the topology the credentials are for
*/
public void upsertWorkerTokensInCredsForTopo(Map<String, String> creds, String user, String topologyId) {
- Arrays.stream(WorkerTokenServiceType.values()).filter(type -> shouldRenewWorkerToken(creds, type))
- .forEach(type -> {ClientAuthUtils.setWorkerToken(creds, createOrUpdateTokenFor(type, user, topologyId));
- });
+ Arrays.stream(WorkerTokenServiceType.values())
+ .filter(type -> shouldRenewWorkerToken(creds, type))
+ .forEach(type -> ClientAuthUtils.setWorkerToken(creds, createOrUpdateTokenFor(type, user, topologyId)));
}
@VisibleForTesting
diff --git a/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java b/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
index 05d4edd..79d8046 100644
--- a/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
+++ b/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
@@ -265,8 +265,6 @@
Map win2sid2acked = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, ACKED), TO_STRING);
Map win2sid2failed = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, FAILED), TO_STRING);
Map win2sid2emitted = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, EMITTED), TO_STRING);
- Map win2sid2transferred = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, TRANSFERRED), TO_STRING);
- Map win2sid2compLat = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, COMP_LATENCIES), TO_STRING);
outputStats.put(ACKED, win2sid2acked.get(window));
outputStats.put(FAILED, win2sid2failed.get(window));
@@ -276,6 +274,7 @@
}
outputStats.put(EMITTED, filterSysStreams2Stat(sid2emitted, includeSys));
+ Map win2sid2transferred = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, TRANSFERRED), TO_STRING);
Map<String, Long> sid2transferred = (Map) win2sid2transferred.get(window);
if (sid2transferred == null) {
sid2transferred = new HashMap<>();
@@ -283,6 +282,7 @@
outputStats.put(TRANSFERRED, filterSysStreams2Stat(sid2transferred, includeSys));
outputStats = swapMapOrder(outputStats);
+ Map win2sid2compLat = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, COMP_LATENCIES), TO_STRING);
Map sid2compLat = (Map) win2sid2compLat.get(window);
Map sid2acked = (Map) win2sid2acked.get(window);
mergeMaps(outputStats, aggSpoutStreamsLatAndCount(sid2compLat, sid2acked));
@@ -301,8 +301,6 @@
*/
public static <K, V extends Number> Map<String, Object> aggPreMergeTopoPageBolt(
Map<String, Object> beat, String window, boolean includeSys) {
- Map<String, Object> ret = new HashMap<>();
-
Map<String, Object> subRet = new HashMap<>();
subRet.put(NUM_EXECUTORS, 1);
subRet.put(NUM_TASKS, beat.get(NUM_TASKS));
@@ -334,6 +332,7 @@
subRet.putAll(aggBoltLatAndCount(
win2sid2execLat.get(window), win2sid2procLat.get(window), win2sid2exec.get(window)));
+ Map<String, Object> ret = new HashMap<>();
ret.put((String) beat.get("comp-id"), subRet);
return ret;
}
@@ -343,8 +342,6 @@
*/
public static <K, V extends Number> Map<String, Object> aggPreMergeTopoPageSpout(
Map<String, Object> m, String window, boolean includeSys) {
- Map<String, Object> ret = new HashMap<>();
-
Map<String, Object> subRet = new HashMap<>();
subRet.put(NUM_EXECUTORS, 1);
subRet.put(NUM_TASKS, m.get(NUM_TASKS));
@@ -372,6 +369,7 @@
windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, ACKED), TO_STRING);
subRet.putAll(aggSpoutLatAndCount(win2sid2compLat.get(window), win2sid2acked.get(window)));
+ Map<String, Object> ret = new HashMap<>();
ret.put((String) m.get("comp-id"), subRet);
return ret;
}
@@ -522,8 +520,6 @@
*/
public static Map<String, Object> aggTopoExecStats(
String window, boolean includeSys, Map<String, Object> accStats, Map<String, Object> beat, String compType) {
- Map<String, Object> ret = new HashMap<>();
-
boolean isSpout = compType.equals(ClientStatsUtil.SPOUT);
// component id -> stats
Map<String, Object> cid2stats;
@@ -552,6 +548,7 @@
w2acked = aggregateCountStreams(ClientStatsUtil.getMapByKey(stats, ACKED));
}
+ Map<String, Object> ret = new HashMap<>();
Set workerSet = (Set) accStats.get(WORKERS_SET);
workerSet.add(Lists.newArrayList(beat.get(HOST), beat.get(PORT)));
ret.put(WORKERS_SET, workerSet);
diff --git a/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java b/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
index eafb315..5ad60fb 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
@@ -21,24 +21,24 @@
*/
public class CompleteTopologyParam {
/**
- * The mocked spout sources
+ * The mocked spout sources.
*/
private MockedSources mockedSources = new MockedSources();
/**
- * the config for the topology when it was submitted to the cluster
+ * the config for the topology when it was submitted to the cluster.
*/
private Map<String, Object> topoConf = new Config();
/**
- * whether cleanup the state?
+ * Indicates whether to cleanup the state.
*/
private boolean cleanupState = true;
/**
- * the topology name you want to submit to the cluster
+ * the topology name you want to submit to the cluster.
*/
private String topologyName;
/**
- * the timeout of topology you want to submit to the cluster
+ * the timeout of topology you want to submit to the cluster.
*/
private int timeoutMs = Testing.TEST_TIMEOUT_MS;
diff --git a/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java b/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
index c3265df..6539517 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
@@ -34,6 +34,7 @@
}
/**
+ * Get port.
* @return the port ZK is listening on (localhost)
*/
public long getPort() {
diff --git a/storm-server/src/main/java/org/apache/storm/testing/TestJob.java b/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
index 9c118e8..205b429 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
@@ -19,7 +19,7 @@
* we put our java unit testing logic in the run method. A sample
* code will be:
*
- * ```java
+ * <p>```java
* Testing.withSimulatedTimeLocalCluster(new TestJob() {
* public void run(Cluster cluster) {
* // your testing logic here.
diff --git a/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java b/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
index 1c6f738..4719ce6 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
@@ -12,6 +12,8 @@
package org.apache.storm.testing;
+import static org.apache.storm.Testing.whileTimeout;
+
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadLocalRandom;
@@ -29,8 +31,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.storm.Testing.whileTimeout;
-
/**
* A tracked topology keeps metrics for every bolt and spout.
* This allows a test to know how many tuples have been fully processed.
@@ -79,7 +79,7 @@
}
/**
- * Wait for 1 tuple to be fully processed
+ * Wait for 1 tuple to be fully processed.
*/
public void trackedWait() {
trackedWait(1, Testing.TEST_TIMEOUT_MS);
@@ -100,29 +100,30 @@
final String id = cluster.getTrackedId();
Random rand = ThreadLocalRandom.current();
whileTimeout(timeoutMs,
- () -> {
- int se = globalAmt(id, "spout-emitted");
- int transferred = globalAmt(id, "transferred");
- int processed = globalAmt(id, "processed");
- LOG.info("emitted {} target {} transferred {} processed {}", se, target, transferred, processed);
- return (target != se) || (transferred != processed);
- },
- () -> {
- Time.advanceTimeSecs(1);
- try {
- Thread.sleep(rand.nextInt(200));
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- });
+ () -> {
+ int se = globalAmt(id, "spout-emitted");
+ int transferred = globalAmt(id, "transferred");
+ int processed = globalAmt(id, "processed");
+ LOG.info("emitted {} target {} transferred {} processed {}", se, target, transferred, processed);
+ return (target != se) || (transferred != processed);
+ },
+ () -> {
+ Time.advanceTimeSecs(1);
+ try {
+ Thread.sleep(rand.nextInt(200));
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
lastSpoutCommit.set(target);
}
/**
- * Read a metric from the tracked cluster (NOT JUST THIS TOPOLOGY)
+ * Read a metric from the tracked cluster (NOT JUST THIS TOPOLOGY).
* @param key one of "spout-emitted", "processed", or "transferred"
* @return the amount of that metric
*/
+ @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
public int globalAmt(String key) {
return globalAmt(cluster.getTrackedId(), key);
}
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java b/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
index a96ddb6..2c932b8 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
@@ -81,6 +81,10 @@
return ret;
}
+ public static String masterStormDistRoot(Map<String, Object> conf, String stormId) throws IOException {
+ return (masterStormDistRoot(conf) + FILE_SEPARATOR + stormId);
+ }
+
/* TODO: make sure test these two functions in manual tests */
public static List<String> getTopoLogsUsers(Map<String, Object> topologyConf) {
List<String> logsUsers = (List<String>) topologyConf.get(DaemonConfig.LOGS_USERS);
@@ -128,10 +132,6 @@
return ret;
}
- public static String masterStormDistRoot(Map<String, Object> conf, String stormId) throws IOException {
- return (masterStormDistRoot(conf) + FILE_SEPARATOR + stormId);
- }
-
public static String supervisorTmpDir(Map<String, Object> conf) throws IOException {
String ret = ConfigUtils.supervisorLocalDir(conf) + FILE_SEPARATOR + "tmp";
FileUtils.forceMkdir(new File(ret));
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java b/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
index 85ade01..eb72594 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
@@ -126,11 +126,16 @@
return null;
}
- public static BlobStore getNimbusBlobStore(Map<String, Object> conf, NimbusInfo nimbusInfo, ILeaderElector leaderElector) {
+ public static BlobStore getNimbusBlobStore(Map<String, Object> conf,
+ NimbusInfo nimbusInfo,
+ ILeaderElector leaderElector) {
return getNimbusBlobStore(conf, null, nimbusInfo, leaderElector);
}
- public static BlobStore getNimbusBlobStore(Map<String, Object> conf, String baseDir, NimbusInfo nimbusInfo, ILeaderElector leaderElector) {
+ public static BlobStore getNimbusBlobStore(Map<String, Object> conf,
+ String baseDir,
+ NimbusInfo nimbusInfo,
+ ILeaderElector leaderElector) {
String type = (String)conf.get(DaemonConfig.NIMBUS_BLOBSTORE);
if (type == null) {
type = LocalFsBlobStore.class.getName();
@@ -174,7 +179,7 @@
* @param dir The input dir to get the disk space of this local dir
* @return The total disk space of the input local directory
*/
- public static long getDU(File dir) {
+ public static long getDiskUsage(File dir) {
long size = 0;
if (!dir.exists()) {
return 0;
@@ -192,7 +197,7 @@
isSymLink = true;
}
if (!isSymLink) {
- size += getDU(allFiles[i]);
+ size += getDiskUsage(allFiles[i]);
}
}
}
@@ -214,13 +219,6 @@
/**
* Meant to be called only by the supervisor for stormjar/stormconf/stormcode files.
- *
- * @param key
- * @param localFile
- * @param cb
- * @throws AuthorizationException
- * @throws KeyNotFoundException
- * @throws IOException
*/
public static void downloadResourcesAsSupervisor(String key, String localFile,
ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException, IOException {
@@ -384,8 +382,7 @@
*/
private static void ensureDirectory(File dir) throws IOException {
if (!dir.mkdirs() && !dir.isDirectory()) {
- throw new IOException("Mkdirs failed to create " +
- dir.toString());
+ throw new IOException("Mkdirs failed to create " + dir.toString());
}
}
@@ -394,10 +391,9 @@
* <p/>
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
- * @param inFile The tar file as input.
- * @param untarDir The untar directory where to untar the tar file.
- * @param symlinksDisabled true if symlinks should be disabled, else false.
- * @throws IOException
+ * @param inFile The tar file as input
+ * @param untarDir The untar directory where to untar the tar file
+ * @param symlinksDisabled true if symlinks should be disabled, else false
*/
public static void unTar(File inFile, File untarDir, boolean symlinksDisabled) throws IOException {
ensureDirectory(untarDir);
@@ -437,8 +433,10 @@
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
- throw new IOException("Error untarring file " + inFile +
- ". Tar process exited with exit code " + exitcode);
+ throw new IOException("Error untarring file "
+ + inFile
+ + ". Tar process exited with exit code "
+ + exitcode);
}
}
@@ -548,18 +546,18 @@
public static void unpack(File localrsrc, File dst, boolean symLinksDisabled) throws IOException {
String lowerDst = localrsrc.getName().toLowerCase();
- if (lowerDst.endsWith(".jar") ||
- lowerDst.endsWith("_jar")) {
+ if (lowerDst.endsWith(".jar")
+ || lowerDst.endsWith("_jar")) {
unJar(localrsrc, dst);
- } else if (lowerDst.endsWith(".zip") ||
- lowerDst.endsWith("_zip")) {
+ } else if (lowerDst.endsWith(".zip")
+ || lowerDst.endsWith("_zip")) {
unZip(localrsrc, dst);
- } else if (lowerDst.endsWith(".tar.gz") ||
- lowerDst.endsWith("_tar_gz") ||
- lowerDst.endsWith(".tgz") ||
- lowerDst.endsWith("_tgz") ||
- lowerDst.endsWith(".tar") ||
- lowerDst.endsWith("_tar")) {
+ } else if (lowerDst.endsWith(".tar.gz")
+ || lowerDst.endsWith("_tar_gz")
+ || lowerDst.endsWith(".tgz")
+ || lowerDst.endsWith("_tgz")
+ || lowerDst.endsWith(".tar")
+ || lowerDst.endsWith("_tar")) {
unTar(localrsrc, dst, symLinksDisabled);
} else {
LOG.warn("Cannot unpack " + localrsrc);
@@ -577,10 +575,10 @@
* Extracts the given file to the given directory. Only zip entries starting with the given prefix are extracted.
* The prefix is stripped off entry names before extraction.
*
- * @param zipFile The zip file to extract.
- * @param toDir The directory to extract to.
+ * @param zipFile The zip file to extract
+ * @param toDir The directory to extract to
* @param prefix The prefix to look for in the zip file. If not null only paths starting with the prefix will be
- * extracted.
+ * extracted
*/
public static void extractZipFile(ZipFile zipFile, File toDir, String prefix) throws IOException {
ensureDirectory(toDir);
@@ -622,8 +620,7 @@
* Given a File input it will unzip the file in a the unzip directory passed as the second parameter.
*
* @param inFile The zip file as input
- * @param toDir The unzip directory where to unzip the zip file.
- * @throws IOException
+ * @param toDir The unzip directory where to unzip the zip file
*/
public static void unZip(File inFile, File toDir) throws IOException {
try (ZipFile zipFile = new ZipFile(inFile)) {
@@ -633,12 +630,10 @@
/**
* Given a zip File input it will return its size Only works for zip files whose uncompressed size is less than 4 GB, otherwise returns
- * the size module 2^32, per gzip specifications
+ * the size module 2^32, per gzip specifications.
*
* @param myFile The zip file as input
* @return zip file size as a long
- *
- * @throws IOException
*/
public static long zipFileSize(File myFile) throws IOException {
try (RandomAccessFile raf = new RandomAccessFile(myFile, "r")) {
@@ -685,7 +680,7 @@
* @param conf The configuration
* @return True if it's resource aware; false otherwise
*/
- public static boolean isRAS(Map<String, Object> conf) {
+ public static boolean isRas(Map<String, Object> conf) {
if (conf.containsKey(DaemonConfig.STORM_SCHEDULER)) {
if (conf.get(DaemonConfig.STORM_SCHEDULER).equals("org.apache.storm.scheduler.resource.ResourceAwareScheduler")) {
return true;
@@ -694,7 +689,7 @@
return false;
}
- public static int getEstimatedWorkerCountForRASTopo(Map<String, Object> topoConf, StormTopology topology)
+ public static int getEstimatedWorkerCountForRasTopo(Map<String, Object> topoConf, StormTopology topology)
throws InvalidTopologyException {
Double defaultWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768d);
Double topologyWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeap);
@@ -755,13 +750,13 @@
public void downloadResourcesAsSupervisorImpl(String key, String localFile,
ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException, IOException {
- final int MAX_RETRY_ATTEMPTS = 2;
- final int ATTEMPTS_INTERVAL_TIME = 100;
- for (int retryAttempts = 0; retryAttempts < MAX_RETRY_ATTEMPTS; retryAttempts++) {
+ final int maxRetryAttempts = 2;
+ final int attemptsIntervalTime = 100;
+ for (int retryAttempts = 0; retryAttempts < maxRetryAttempts; retryAttempts++) {
if (downloadResourcesAsSupervisorAttempt(cb, key, localFile)) {
break;
}
- Utils.sleep(ATTEMPTS_INTERVAL_TIME);
+ Utils.sleep(attemptsIntervalTime);
}
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java b/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
index 6f29552..e47ff10 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
@@ -21,17 +21,17 @@
*/
public class StormCommonInstaller implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(StormCommonInstaller.class);
- private StormCommon _oldInstance;
- private StormCommon _curInstance;
+ private StormCommon oldInstance;
+ private StormCommon curInstance;
public StormCommonInstaller(StormCommon instance) {
- _oldInstance = StormCommon.setInstance(instance);
- _curInstance = instance;
+ oldInstance = StormCommon.setInstance(instance);
+ curInstance = instance;
}
@Override
public void close() throws Exception {
- if (StormCommon.setInstance(_oldInstance) != _curInstance) {
+ if (StormCommon.setInstance(oldInstance) != curInstance) {
throw new IllegalStateException(
"Instances of this resource must be closed in reverse order of opening.");
}
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java b/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
index eb7019e..f25bc3d 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
@@ -21,57 +21,58 @@
public class ZookeeperServerCnxnFactory {
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperServerCnxnFactory.class);
- int _port;
- NIOServerCnxnFactory _factory;
+ int port;
+ NIOServerCnxnFactory factory;
public ZookeeperServerCnxnFactory(int port, int maxClientCnxns) {
//port range
int max;
if (port <= 0) {
- _port = 2000;
+ this.port = 2000;
max = 65535;
} else {
- _port = port;
+ this.port = port;
max = port;
}
try {
- _factory = new NIOServerCnxnFactory();
+ factory = new NIOServerCnxnFactory();
} catch (IOException e) {
- _port = 0;
- _factory = null;
+ this.port = 0;
+ factory = null;
e.printStackTrace();
throw new RuntimeException(e.getMessage());
}
//look for available port
- for (; _port <= max; _port++) {
+ for (; this.port <= max; this.port++) {
try {
- _factory.configure(new InetSocketAddress(_port), maxClientCnxns);
- LOG.debug("Zookeeper server successfully binded at port " + _port);
+ factory.configure(new InetSocketAddress(this.port), maxClientCnxns);
+ LOG.debug("Zookeeper server successfully binded at port " + this.port);
break;
} catch (BindException e1) {
+ //ignore
} catch (IOException e2) {
- _port = 0;
- _factory = null;
+ this.port = 0;
+ factory = null;
e2.printStackTrace();
throw new RuntimeException(e2.getMessage());
}
}
- if (_port > max) {
- _port = 0;
- _factory = null;
+ if (this.port > max) {
+ this.port = 0;
+ factory = null;
LOG.error("Failed to find a port for Zookeeper");
throw new RuntimeException("No port is available to launch an inprocess zookeeper.");
}
}
public int port() {
- return _port;
+ return port;
}
public NIOServerCnxnFactory factory() {
- return _factory;
+ return factory;
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java b/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
index d7b6521..693b585 100644
--- a/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
+++ b/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
@@ -290,8 +290,8 @@
private static boolean equivalent(List<ACL> a, List<ACL> b) {
if (a.size() == b.size()) {
- for (ACL aAcl : a) {
- if (!b.contains(aAcl)) {
+ for (ACL acl : a) {
+ if (!b.contains(acl)) {
return false;
}
}
diff --git a/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java b/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
index 9171038..e62c9e3 100644
--- a/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
+++ b/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
@@ -44,7 +44,7 @@
// tests by subclassing.
private static final Zookeeper INSTANCE = new Zookeeper();
private static Logger LOG = LoggerFactory.getLogger(Zookeeper.class);
- private static Zookeeper _instance = INSTANCE;
+ private static Zookeeper instance = INSTANCE;
/**
* Provide an instance of this class for delegates to use. To mock out delegated methods, provide an instance of a subclass that
@@ -53,7 +53,7 @@
* @param u a Zookeeper instance
*/
public static void setInstance(Zookeeper u) {
- _instance = u;
+ instance = u;
}
/**
@@ -61,12 +61,10 @@
* longer desired.
*/
public static void resetInstance() {
- _instance = INSTANCE;
+ instance = INSTANCE;
}
public static NIOServerCnxnFactory mkInprocessZookeeper(String localdir, Integer port) throws Exception {
- File localfile = new File(localdir);
- ZooKeeperServer zk = new ZooKeeperServer(localfile, localfile, 2000);
NIOServerCnxnFactory factory = null;
int report = 2000;
int limitPort = 65535;
@@ -87,6 +85,8 @@
}
}
LOG.info("Starting inprocess zookeeper at port {} and dir {}", report, localdir);
+ File localfile = new File(localdir);
+ ZooKeeperServer zk = new ZooKeeperServer(localfile, localfile, 2000);
factory.startup(zk);
return factory;
}
@@ -119,7 +119,7 @@
public static ILeaderElector zkLeaderElector(Map<String, Object> conf, CuratorFramework zkClient, BlobStore blobStore,
final TopoCache tc, IStormClusterState clusterState, List<ACL> acls,
StormMetricsRegistry metricsRegistry) {
- return _instance.zkLeaderElectorImpl(conf, zkClient, blobStore, tc, clusterState, acls, metricsRegistry);
+ return instance.zkLeaderElectorImpl(conf, zkClient, blobStore, tc, clusterState, acls, metricsRegistry);
}
protected ILeaderElector zkLeaderElectorImpl(Map<String, Object> conf, CuratorFramework zk, BlobStore blobStore,
diff --git a/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java b/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
index 1c313be..e8bb376 100644
--- a/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
+++ b/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
@@ -107,10 +107,10 @@
new HashMap<>(), ops, "profile");
//null worker id means generate one...
- assertNotNull(mc._workerId);
+ assertNotNull(mc.workerId);
verify(ls).getApprovedWorkers();
Map<String, Integer> expectedNewState = new HashMap<String, Integer>();
- expectedNewState.put(mc._workerId, port);
+ expectedNewState.put(mc.workerId, port);
verify(ls).setApprovedWorkers(expectedNewState);
}
@@ -137,7 +137,7 @@
"SUPERVISOR", supervisorPort, port, la, null, ls, null, new StormMetricsRegistry(),
new HashMap<>(), ops, "profile");
- assertEquals(workerId, mc._workerId);
+ assertEquals(workerId, mc.workerId);
}
@Test
@@ -189,7 +189,7 @@
mc.cleanUp();
- assertNull(mc._workerId);
+ assertNull(mc.workerId);
verify(ls).getApprovedWorkers();
Map<String, Integer> expectedNewState = new HashMap<String, Integer>();
verify(ls).setApprovedWorkers(expectedNewState);
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
index 17b9fa5..d8ea9ae 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
@@ -103,9 +103,9 @@
TopologyDetails topology2 = genTopology("topology2", config, 1, 0, 2, 0, 0, 0, "user");
Topologies topologies = new Topologies(topology1, topology2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
- Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+ Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
assertEquals(5, nodes.size());
- RAS_Node node = nodes.get("r000s000");
+ RasNode node = nodes.get("r000s000");
assertEquals("r000s000", node.getId());
assertTrue(node.isAlive());
@@ -904,7 +904,7 @@
scheduler.prepare(config);
scheduler.schedule(topologies, cluster);
- Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+ Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
for (SchedulerAssignment assignment : cluster.getAssignments().values()) {
for (Entry<WorkerSlot, WorkerResources> entry : new HashMap<>(assignment.getScheduledResources()).entrySet()) {
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
index fa4ad78..b10ad91 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
@@ -35,7 +35,7 @@
import org.apache.storm.scheduler.Topologies;
import org.apache.storm.scheduler.TopologyDetails;
import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
+import org.apache.storm.scheduler.resource.RasNode;
import org.apache.storm.scheduler.resource.ResourceAwareScheduler;
import org.apache.storm.scheduler.resource.SchedulingResult;
import org.apache.storm.scheduler.resource.strategies.scheduling.BaseResourceAwareStrategy.ObjectResources;
@@ -474,7 +474,7 @@
List<String> nodeHostnames = rackToNodes.get("rack-1");
for (int i = 0; i< topo2.getExecutors().size()/2; i++) {
String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
- RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
+ RasNode node = rs.hostnameToNodes(nodeHostname).get(0);
WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
ExecutorDetails targetExec = executorIterator.next();
// to keep track of free slots
@@ -599,7 +599,7 @@
List<String> nodeHostnames = rackToNodes.get("rack-1");
for (int i = 0; i< topo2.getExecutors().size()/2; i++) {
String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
- RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
+ RasNode node = rs.hostnameToNodes(nodeHostname).get(0);
WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
ExecutorDetails targetExec = executorIterator.next();
// to keep track of free slots