introduce sandbox config
diff --git a/heron/config/src/yaml/BUILD b/heron/config/src/yaml/BUILD
index 7224cb3..d29d485 100644
--- a/heron/config/src/yaml/BUILD
+++ b/heron/config/src/yaml/BUILD
@@ -23,6 +23,11 @@
 )
 
 filegroup(
+    name = "conf-sandbox-yaml",
+    srcs = glob(["conf/sandbox/*.yaml"]),
+)
+
+filegroup(
     name = "conf-aurora-yaml",
     srcs = glob(["conf/aurora/*"]),
 )
diff --git a/heron/config/src/yaml/conf/sandbox/client.yaml b/heron/config/src/yaml/conf/sandbox/client.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/client.yaml
diff --git a/heron/config/src/yaml/conf/sandbox/healthmgr.yaml b/heron/config/src/yaml/conf/sandbox/healthmgr.yaml
new file mode 100644
index 0000000..a8d3478
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/healthmgr.yaml
@@ -0,0 +1,24 @@
+# Topology health manager mode:
+# disabled = do not launch the health manager
+# cluster = launch the health manager on container-0
+heron.topology.healthmgr.mode: disabled
+
+# Default class and url for providing metrics
+heron.healthmgr.metricsource.type: com.twitter.heron.healthmgr.sensors.TrackerMetricsProvider
+heron.healthmgr.metricsource.url: http://localhost:8888
+
+## list of policies to be executed for self regulation
+#heron.class.health.policies:
+#  - dynamic-resource-allocation
+#  - auto-restart-backpressure-container
+#
+## configuration specific to individual policies listed above
+#dynamic-resource-allocation:
+#  health.policy.class: com.twitter.heron.healthmgr.policy.DynamicResourceAllocationPolicy
+#  health.policy.interval.ms: 120000
+#  BackPressureDetector.noiseFilterMillis: 20
+#  GrowingWaitQueueDetector.limit: 5
+#auto-restart-backpressure-container:
+#  health.policy.class: com.twitter.heron.healthmgr.policy.AutoRestartBackpressureContainerPolicy
+#  health.policy.interval.ms: 120000
+#  BackPressureDetector.noiseFilterMillis: 20
diff --git a/heron/config/src/yaml/conf/sandbox/heron_internals.yaml b/heron/config/src/yaml/conf/sandbox/heron_internals.yaml
new file mode 100644
index 0000000..56fb659
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/heron_internals.yaml
@@ -0,0 +1,279 @@
+################################################################################
+# Default values for various configs used inside Heron.
+################################################################################
+# All the config associated with time is in the unit of milli-seconds,
+# unless otherwise specified.
+################################################################################
+# All the config associated with data size is in the unit of bytes, unless
+# otherwise specified.
+################################################################################
+
+################################################################################
+# System level configs
+################################################################################
+
+### heron.* configs are general configurations over all componenets
+
+# The relative path to the logging directory
+heron.logging.directory: "log-files"
+
+# The maximum log file size in MB
+heron.logging.maximum.size.mb: 100
+
+# The maximum number of log files
+heron.logging.maximum.files: 5
+
+# The interval in seconds after which to check if the tmaster location has been fetched or not
+heron.check.tmaster.location.interval.sec: 120
+
+# The interval in seconds to prune logging files in C++
+heron.logging.prune.interval.sec: 300
+
+# The interval in seconds to flush log files in C++
+heron.logging.flush.interval.sec: 10
+
+# The threshold level to log error
+heron.logging.err.threshold: 3
+
+# The interval in seconds for different components to export metrics to metrics manager
+heron.metrics.export.interval.sec: 60
+
+# The maximum count of exceptions in one MetricPublisherPublishMessage protobuf
+heron.metrics.max.exceptions.per.message.count: 1024
+
+################################################################################
+# Configs related to Stream Manager, starts with heron.streammgr.*
+################################################################################
+
+# The tuple cache (used for batching) can be drained in two ways:
+# (a) Time based
+# (b) size based
+
+# The frequency in ms to drain the tuple cache in stream manager
+heron.streammgr.cache.drain.frequency.ms: 10
+
+# The sized based threshold in MB for buffering data tuples
+# waiting for checkpoint markers before giving up
+heron.streammgr.stateful.buffer.size.mb: 100
+
+# The sized based threshold in MB for draining the tuple cache
+heron.streammgr.cache.drain.size.mb: 100
+
+# For efficient acknowledgements
+heron.streammgr.xormgr.rotatingmap.nbuckets: 3
+
+# The max number of messages in the memory pool for each message type
+heron.streammgr.mempool.max.message.number: 512
+
+# The max reconnect attempts to other stream managers for stream manager client
+heron.streammgr.client.reconnect.max.attempts: 300
+
+# The reconnect interval to other stream managers in secs for stream manager client
+heron.streammgr.client.reconnect.interval.sec: 1
+
+# The reconnect interval to tamster in second for stream manager client
+heron.streammgr.client.reconnect.tmaster.interval.sec: 10
+
+# The maximum packet size in MB of stream manager's network options
+heron.streammgr.network.options.maximum.packet.mb: 10
+
+# The interval in seconds to send heartbeat
+heron.streammgr.tmaster.heartbeat.interval.sec: 10
+
+# Maximum batch size in MB to read by stream manager from socket
+heron.streammgr.connection.read.batch.size.mb: 1
+
+# Maximum batch size in MB to write by stream manager to socket
+heron.streammgr.connection.write.batch.size.mb: 1
+
+# Number of times we should wait to see a buffer full while enqueueing data
+# before declaring start of back pressure
+heron.streammgr.network.backpressure.threshold: 3
+
+# High water mark on the num in MB that can be left outstanding on a connection
+heron.streammgr.network.backpressure.highwatermark.mb: 100
+
+# Low water mark on the num in MB that can be left outstanding on a connection
+heron.streammgr.network.backpressure.lowwatermark.mb: 50
+
+################################################################################
+# Configs related to Topology Master, starts with heron.tmaster.*
+################################################################################
+
+# The maximum interval in minutes of metrics to be kept in tmaster
+heron.tmaster.metrics.collector.maximum.interval.min: 180
+
+# The maximum time to retry to establish the tmaster
+heron.tmaster.establish.retry.times: 30
+
+# The interval to retry to establish the tmaster
+heron.tmaster.establish.retry.interval.sec: 1
+
+# Maximum packet size in MB of tmaster's network options to connect to stream managers
+heron.tmaster.network.master.options.maximum.packet.mb: 16
+
+# Maximum packet size in MB of tmaster's network options to connect to scheduler
+heron.tmaster.network.controller.options.maximum.packet.mb: 1
+
+# Maximum packet size in MB of tmaster's network options for stat queries
+heron.tmaster.network.stats.options.maximum.packet.mb: 1
+
+# The interval for tmaster to purge metrics from socket
+heron.tmaster.metrics.collector.purge.interval.sec: 60
+
+# The maximum # of exceptions to be stored in tmetrics collector, to prevent potential OOM
+heron.tmaster.metrics.collector.maximum.exception: 256
+
+# Should the metrics reporter bind on all interfaces
+heron.tmaster.metrics.network.bindallinterfaces: False
+
+# The timeout in seconds for stream mgr, compared with (current time - last heartbeat time)
+heron.tmaster.stmgr.state.timeout.sec: 60
+
+################################################################################
+# Configs related to Topology Master, starts with heron.metricsmgr.*
+################################################################################
+
+# The size of packets to read from socket will be determined by the minimal of:
+# (a) time based
+# (b) size based
+
+# Time based, the maximum batch time in ms for metricsmgr to read from socket
+heron.metricsmgr.network.read.batch.time.ms: 16
+
+# Size based, the maximum batch size in bytes to read from socket
+heron.metricsmgr.network.read.batch.size.bytes: 32768
+
+# The size of packets to write to socket will be determined by the minimum of
+# (a) time based
+# (b) size based
+
+# Time based, the maximum batch time in ms for metricsmgr to write to socket
+heron.metricsmgr.network.write.batch.time.ms: 16
+
+# Size based, the maximum batch size in bytes to write to socket
+heron.metricsmgr.network.write.batch.size.bytes: 32768
+
+# The maximum socket's send buffer size in bytes
+heron.metricsmgr.network.options.socket.send.buffer.size.bytes: 655360
+
+# The maximum socket's received buffer size in bytes of metricsmgr's network options
+heron.metricsmgr.network.options.socket.received.buffer.size.bytes: 873800
+
+# The maximum packet size that metrics mgr can read
+heron.metricsmgr.network.options.maximum.packetsize.bytes: 1048576
+
+################################################################################
+# Configs related to Heron Instance, starts with heron.instance.*
+################################################################################
+
+# The maximum size of packets that instance can read
+heron.instance.network.options.maximum.packetsize.bytes: 10485760
+
+# The queue capacity (num of items) in bolt for buffer packets to read from stream manager
+heron.instance.internal.bolt.read.queue.capacity: 128
+
+# The queue capacity (num of items) in bolt for buffer packets to write to stream manager
+heron.instance.internal.bolt.write.queue.capacity: 128
+
+# The queue capacity (num of items) in spout for buffer packets to read from stream manager
+heron.instance.internal.spout.read.queue.capacity: 1024
+
+# The queue capacity (num of items) in spout for buffer packets to write to stream manager
+heron.instance.internal.spout.write.queue.capacity: 128
+
+# The queue capacity (num of items) for metrics packets to write to metrics manager
+heron.instance.internal.metrics.write.queue.capacity: 128
+
+# The size of packets read from stream manager will be determined by the minimal of
+# (a) time based
+# (b) size based
+
+# Time based, the maximum batch time in ms for instance to read from stream manager per attempt
+heron.instance.network.read.batch.time.ms: 16
+
+# Size based, the maximum batch size in bytes to read from stream manager
+heron.instance.network.read.batch.size.bytes: 32768
+
+# The size of packets written to stream manager will be determined by the minimum of
+# (a) time based
+# (b) size based
+
+# Time based, the maximum batch time in ms for instance to write to stream manager per attempt
+heron.instance.network.write.batch.time.ms: 16
+
+# Size based, the maximum batch size in bytes to write to stream manager
+heron.instance.network.write.batch.size.bytes: 32768
+
+# The maximum socket's send buffer size in bytes
+heron.instance.network.options.socket.send.buffer.size.bytes: 6553600
+
+# The maximum socket's received buffer size in bytes of instance's network options
+heron.instance.network.options.socket.received.buffer.size.bytes: 8738000
+
+# The maximum # of data tuple to batch in a HeronDataTupleSet protobuf
+heron.instance.set.data.tuple.capacity: 10240
+
+# The maximum size in bytes of data tuple to batch in a HeronDataTupleSet protobuf
+heron.instance.set.data.tuple.size.bytes: 1310720
+
+# The maximum # of control tuple to batch in a HeronControlTupleSet protobuf
+heron.instance.set.control.tuple.capacity: 1024
+
+# The maximum time in ms for a spout to do acknowledgement per attempt, the ack batch could
+# also break if there are no more ack tuples to process
+heron.instance.ack.batch.time.ms: 128
+
+# The maximum time in ms for an spout instance to emit tuples per attempt
+heron.instance.emit.batch.time.ms: 160
+
+# The maximum batch size in bytes for an spout to emit tuples per attempt
+heron.instance.emit.batch.size.bytes: 327680
+
+# The maximum time in ms for an bolt instance to execute tuples per attempt
+heron.instance.execute.batch.time.ms: 16
+
+# The maximum batch size in bytes for an bolt instance to execute tuples per attempt
+heron.instance.execute.batch.size.bytes: 32768
+
+# The time to wait before the instance exits forcibly when uncaught exception happens
+heron.instance.force.exit.timeout.ms: 2000
+
+# Interval in seconds to reconnect to the stream manager, including the request timeout in connecting
+heron.instance.reconnect.streammgr.interval.sec: 5
+heron.instance.reconnect.streammgr.times: 60
+
+# Interval in seconds to reconnect to the metrics manager, including the request timeout in connecting
+heron.instance.reconnect.metricsmgr.interval.sec: 5
+heron.instance.reconnect.metricsmgr.times: 60
+
+# The interval in second for an instance to sample its system metrics, for instance, cpu load.
+heron.instance.metrics.system.sample.interval.sec: 10
+
+# For efficient acknowledgement
+heron.instance.acknowledgement.nbuckets: 10
+
+################################################################################
+# For dynamically tuning the available sizes in the interval read & write queues
+# to provide high performance while avoiding GC issues
+################################################################################
+
+# The expected size on read queue in bolt
+heron.instance.tuning.expected.bolt.read.queue.size: 8
+
+# The expected size on write queue in bolt
+heron.instance.tuning.expected.bolt.write.queue.size: 8
+
+# The expected size on read queue in spout
+heron.instance.tuning.expected.spout.read.queue.size: 512
+
+# The exepected size on write queue in spout
+heron.instance.tuning.expected.spout.write.queue.size: 8
+
+# The expected size on metrics write queue
+heron.instance.tuning.expected.metrics.write.queue.size: 8
+
+heron.instance.tuning.current.sample.weight: 0.8
+
+# Interval in ms to tune the size of in & out data queue in instance
+heron.instance.tuning.interval.ms: 100
diff --git a/heron/config/src/yaml/conf/sandbox/metrics_sinks.yaml b/heron/config/src/yaml/conf/sandbox/metrics_sinks.yaml
new file mode 100644
index 0000000..b2e3e21
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/metrics_sinks.yaml
@@ -0,0 +1,113 @@
+########### These all have default values as shown
+
+# We would specify the unique sink-id first
+sinks:
+  - file-sink
+  - tmaster-sink
+  - metricscache-sink
+
+########### Now we would specify the detailed configuration for every unique sink
+########### Syntax: sink-id: - option(s)
+
+########### option class is required as we need to instantiate a new instance by reflection
+########### option flush-frequency-ms is required to invoke flush() at interval
+########### option sink-restart-attempts, representsing # of times to restart a sink when it throws exceptions and dies.
+###########   If this option is missed, default value 0 would be supplied; negative value represents to restart it forever.
+
+########### Other options would be constructed as an immutable map passed to IMetricsSink's init(Map conf) as argument,
+########### We would be able to fetch value by conf.get(options), for instance:
+########### We could get "com.twitter.heron.metricsmgr.sink.FileSink" if conf.get("class") is called inside file-sink's instance
+
+### Config for file-sink
+file-sink:
+  class: "com.twitter.heron.metricsmgr.sink.FileSink"
+  flush-frequency-ms: 60000 # 1 min
+  sink-restart-attempts: -1 # Forever
+  filename-output: "metrics.json" # File for metrics to write to
+  file-maximum: 5 # maximum number of file saved in disk
+
+### Config for tmaster-sink
+tmaster-sink:
+  class: "com.twitter.heron.metricsmgr.sink.tmaster.TMasterSink"
+  flush-frequency-ms: 60000
+  sink-restart-attempts: -1 # Forever
+  tmaster-location-check-interval-sec: 5
+  tmaster-client:
+    reconnect-interval-second: 5 # The re-connect interval to TMaster from TMasterClient
+    # The size of packets written to TMaster will be determined by the minimal of: (a) time based (b) size based
+    network-write-batch-size-bytes: 32768 # Size based, the maximum batch size in bytes to write to TMaster
+    network-write-batch-time-ms: 16 # Time based, the maximum batch time in ms for Metrics Manager to write to TMaster per attempt
+    network-read-batch-size-bytes: 32768 # Size based, the maximum batch size in bytes to write to TMaster
+    network-read-batch-time-ms: 16 # Time based, the maximum batch time in ms for Metrics Manager to write to TMaster per attempt
+    socket-send-buffer-size-bytes: 6553600 # The maximum socket's send buffer size in bytes
+    socket-received-buffer-size-bytes: 8738000 # The maximum socket's received buffer size in bytes
+  tmaster-metrics-type:
+    "__emit-count": SUM
+    "__execute-count": SUM
+    "__fail-count": SUM
+    "__ack-count": SUM
+    "__complete-latency": AVG
+    "__execute-latency": AVG
+    "__process-latency": AVG
+    "__jvm-uptime-secs": LAST
+    "__jvm-process-cpu-load": LAST
+    "__jvm-memory-used-mb": LAST
+    "__jvm-memory-mb-total": LAST
+    "__jvm-gc-collection-time-ms": LAST
+    "__server/__time_spent_back_pressure_initiated": SUM
+    "__time_spent_back_pressure_by_compid": SUM
+
+### Config for metricscache-sink
+metricscache-sink:
+  class: "com.twitter.heron.metricsmgr.sink.metricscache.MetricsCacheSink"
+  flush-frequency-ms: 60000
+  sink-restart-attempts: -1 # Forever
+  metricscache-location-check-interval-sec: 5
+  metricscache-client:
+    reconnect-interval-second: 5 # The re-connect interval to TMaster from TMasterClient
+    # The size of packets written to TMaster will be determined by the minimal of: (a) time based (b) size based
+    network-write-batch-size-bytes: 32768 # Size based, the maximum batch size in bytes to write to TMaster
+    network-write-batch-time-ms: 16 # Time based, the maximum batch time in ms for Metrics Manager to write to TMaster per attempt
+    network-read-batch-size-bytes: 32768 # Size based, the maximum batch size in bytes to write to TMaster
+    network-read-batch-time-ms: 16 # Time based, the maximum batch time in ms for Metrics Manager to write to TMaster per attempt
+    socket-send-buffer-size-bytes: 6553600 # The maximum socket's send buffer size in bytes
+    socket-received-buffer-size-bytes: 8738000 # The maximum socket's received buffer size in bytes
+  metricscache-metrics-type:
+    "__emit-count": SUM
+    "__execute-count": SUM
+    "__fail-count": SUM
+    "__ack-count": SUM
+    "__complete-latency": AVG
+    "__execute-latency": AVG
+    "__process-latency": AVG
+    "__jvm-uptime-secs": LAST
+    "__jvm-process-cpu-load": LAST
+    "__jvm-memory-used-mb": LAST
+    "__jvm-memory-mb-total": LAST
+    "__jvm-gc-collection-time-ms": LAST
+    "__server/__time_spent_back_pressure_initiated": SUM
+    "__time_spent_back_pressure_by_compid": SUM
+
+### Config for scribe-sink
+# scribe-sink:
+#   class: "com.twitter.heron.metricsmgr.sink.ScribeSink"
+#   flush-frequency-ms: 60000
+#   sink-restart-attempts: -1 # Forever
+#   scribe-host: "127.0.0.1" # The host of scribe to be exported metrics to
+#   scribe-port: 1463 # The port of scribe to be exported metrics to
+#   scribe-category: "scribe-category" # The category of the scribe to be exported metrics to
+#   service-namespace: "heron" # The service name of the metrics in scribe-category
+#   scribe-timeout-ms: 200 # The timeout in seconds for metrics manager to write metrics to scribe
+#   scribe-connect-server-attempts: 2 # The maximum retry attempts to connect to scribe server
+#   scribe-retry-attempts: 5 # The maximum retry attempts to write metrics to scribe
+#   scribe-retry-interval-ms: 100 # The interval to retry to write metrics to scribe
+
+### Config for graphite-sink
+### Currently the graphite-sink is disabled
+# graphite-sink:
+#   class: "com.twitter.heron.metricsmgr.sink.GraphiteSink"
+#   flush-frequency-ms: 60000
+#   graphite_host: "127.0.0.1" # The host of graphite to be exported metrics to
+#   graphite_port: 2004 # The port of graphite to be exported metrics to
+#   metrics_prefix: "heron" # The prefix of every metrics
+#   server_max_reconnect-attempts: 20 # The max reconnect attempts when failing to connect to graphite server
diff --git a/heron/config/src/yaml/conf/sandbox/packing.yaml b/heron/config/src/yaml/conf/sandbox/packing.yaml
new file mode 100644
index 0000000..076976a
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/packing.yaml
@@ -0,0 +1,3 @@
+# packing algorithm for packing instances into containers
+heron.class.packing.algorithm:    com.twitter.heron.packing.roundrobin.RoundRobinPacking
+heron.class.repacking.algorithm:  com.twitter.heron.packing.binpacking.FirstFitDecreasingPacking
\ No newline at end of file
diff --git a/heron/config/src/yaml/conf/sandbox/scheduler.yaml b/heron/config/src/yaml/conf/sandbox/scheduler.yaml
new file mode 100644
index 0000000..ca6e8b8
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/scheduler.yaml
@@ -0,0 +1,11 @@
+# scheduler class for distributing the topology for execution
+heron.class.scheduler:                       com.twitter.heron.scheduler.local.LocalScheduler
+
+# launcher class for submitting and launching the topology
+heron.class.launcher:                        com.twitter.heron.scheduler.local.LocalLauncher
+
+# working directory for the topologies
+heron.scheduler.local.working.directory:     ${HOME}/.herondata/topologies/${CLUSTER}/${ROLE}/${TOPOLOGY}
+
+# location of java - pick it up from shell environment
+heron.directory.sandbox.java.home:           ${JAVA_HOME}
diff --git a/heron/config/src/yaml/conf/sandbox/stateful.yaml b/heron/config/src/yaml/conf/sandbox/stateful.yaml
new file mode 100644
index 0000000..8b46317
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/stateful.yaml
@@ -0,0 +1,21 @@
+# indicate the type of storage for checkpoint is local file system
+heron.statefulstorage.classname:            "com.twitter.heron.statefulstorage.localfs.LocalFileSystemStorage"
+
+heron.statefulstorage.config:
+  heron.statefulstorage.classpath:           ""
+  heron.statefulstorage.localfs.root.path:   ~/.herondata/checkpoints
+
+# Following are configs for socket between ckptmgr and stateful storage
+heron.ckptmgr.network.write.batch.size.bytes: 32768
+
+heron.ckptmgr.network.write.batch.time.ms: 16
+
+heron.ckptmgr.network.read.batch.size.bytes: 32768
+
+heron.ckptmgr.network.read.batch.time.ms: 16
+
+heron.ckptmgr.network.options.socket.send.buffer.size.bytes: 655360
+
+heron.ckptmgr.network.options.socket.receive.buffer.size.bytes: 655360
+
+heron.ckptmgr.network.options.maximum.packetsize.bytes: 10485760
diff --git a/heron/config/src/yaml/conf/sandbox/statemgr.yaml b/heron/config/src/yaml/conf/sandbox/statemgr.yaml
new file mode 100644
index 0000000..e8cc375
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/statemgr.yaml
@@ -0,0 +1,12 @@
+# local state manager class for managing state in a persistent fashion
+heron.class.state.manager:                      com.twitter.heron.statemgr.localfs.LocalFileSystemStateManager
+
+# local state manager connection string
+heron.statemgr.connection.string:               LOCALMODE
+
+# path of the root address to store the state in a local file system
+heron.statemgr.root.path:                       ${HOME}/.herondata/repository/state/${CLUSTER}
+
+# create the sub directories, if needed
+heron.statemgr.localfs.is.initialize.file.tree: True
+
diff --git a/heron/config/src/yaml/conf/sandbox/uploader.yaml b/heron/config/src/yaml/conf/sandbox/uploader.yaml
new file mode 100644
index 0000000..6982612
--- /dev/null
+++ b/heron/config/src/yaml/conf/sandbox/uploader.yaml
@@ -0,0 +1,5 @@
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:                            com.twitter.heron.uploader.localfs.LocalFileSystemUploader
+
+# name of the directory to upload topologies for local file system uploader
+heron.uploader.localfs.file.system.directory:    ${HOME}/.herondata/repository/topologies/${CLUSTER}/${ROLE}/${TOPOLOGY}