Pushed updates to logging server to master
diff --git a/Vagrantfile b/Vagrantfile
index f4a738c..d7651ef 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -25,7 +25,7 @@
end
config.vm.define "elk" do |elk|
- elk.vm.box = "ubuntu/trusty64"
+ elk.vm.box = "ubuntu/trusty64"
# Change the default elk vagrant box folder to point to the
# elk directory within the project. This will allow separation between
@@ -38,7 +38,7 @@
# boxes.
# - Expose the following ports to be used within this box to
# host data being sent between the web server and the ELK server.
- elk.vm.network "private_network", ip: "172.16.1.100"
+ elk.vm.network "private_network", ip: "192.16.1.100"
# Provisioner: Runs the provisioning script that will provision
# the vagrant box for the first time, or forced.
@@ -63,19 +63,18 @@
# Specify a base virtual machine that is based on Ubuntu Trusty Tahr
dev.vm.box = "ubuntu/trusty64"
- # Setup a static IP to allow both vagrant boxes to know where
- # to contact each other. This will allow communication between the
- # web developer and the logging server.
- dev.vm.network "private_network", ip: "172.16.1.10"
-
- # Specify the provisioning script that will be used in order to
- # install the necessary files needed for this vagrant box
- dev.vm.provision "shell", inline: "twistd -y /vagrant/twisted_client.py &",
- run: "always"
-
# Change the default client vagrant box folder to point to the
# client directory within the project. This will allow separation between
# client and server folders.
dev.vm.synced_folder "client/", "/vagrant"
+
+ # Setup a static IP to allow both vagrant boxes to know where
+ # to contact each other. This will allow communication between the
+ # web developer and the logging server.
+ dev.vm.network "private_network", ip: "192.16.1.10"
+
+ # Specify the provisioning script that will be used in order to
+ # install the necessary files needed for this vagrant box
+ dev.vm.provision "shell", inline: "twistd -y /vagrant/twisted_client.py &", run: "always"
end
end
diff --git a/client/www/js/logging.js b/client/www/js/logging.js
index 5634f3a..d88c03f 100644
--- a/client/www/js/logging.js
+++ b/client/www/js/logging.js
@@ -49,7 +49,7 @@
var ale2 = new userale(
{
- loggingUrl: 'http://192.168.1.100',
+ loggingUrl: 'http://192.16.1.100',
toolName: 'userale-test',
toolVersion: '3.0.0',
elementGroups: [
diff --git a/dashboard/files/config/elasticsearch.yml b/dashboard/files/config/elasticsearch.yml
new file mode 100644
index 0000000..fffdb9e
--- /dev/null
+++ b/dashboard/files/config/elasticsearch.yml
@@ -0,0 +1,115 @@
+#
+# Copyright 2014 The Charles Stark Draper Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ======================== Elasticsearch Configuration =========================
+#
+# NOTE: Elasticsearch comes with reasonable defaults for most settings.
+# Before you set out to tweak and tune the configuration, make sure you
+# understand what are you trying to accomplish and the consequences.
+#
+# The primary way of configuring a node is via this file. This template lists
+# the most important settings you may want to configure for a production cluster.
+#
+# Please see the documentation for further information on configuration options:
+# <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
+#
+# ---------------------------------- Cluster -----------------------------------
+#
+# Use a descriptive name for your cluster:
+#
+# cluster.name: my-application
+#
+# ------------------------------------ Node ------------------------------------
+#
+# Use a descriptive name for the node:
+#
+# node.name: node-1
+#
+# Add custom attributes to the node:
+#
+# node.rack: r1
+#
+# ----------------------------------- Paths ------------------------------------
+#
+# Path to directory where to store the data (separate multiple locations by comma):
+#
+# path.data: /path/to/data
+#
+# Path to log files:
+#
+# path.logs: /path/to/logs
+#
+# ----------------------------------- Memory -----------------------------------
+#
+# Lock the memory on startup:
+#
+# bootstrap.mlockall: true
+#
+# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
+# available on the system and that the owner of the process is allowed to use this limit.
+#
+# Elasticsearch performs poorly when the system is swapping the memory.
+#
+# ---------------------------------- Network -----------------------------------
+#
+# Set the bind address to a specific IP (IPv4 or IPv6):
+#
+# network.host: "172.16.1.100"
+#
+# Set a custom port for HTTP:
+#
+# http.port: 9200
+#
+# For more information, see the documentation at:
+# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
+#
+# --------------------------------- Discovery ----------------------------------
+#
+# Pass an initial list of hosts to perform discovery when new node is started:
+# The default list of hosts is ["127.0.0.1", "[::1]"]
+#
+# discovery.zen.ping.unicast.hosts: ["host1", "host2"]
+#
+# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
+#
+# discovery.zen.minimum_master_nodes: 3
+#
+# For more information, see the documentation at:
+# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
+#
+# ---------------------------------- Gateway -----------------------------------
+#
+# Block initial recovery after a full cluster restart until N nodes are started:
+#
+# gateway.recover_after_nodes: 3
+#
+# For more information, see the documentation at:
+# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
+#
+# ---------------------------------- Various -----------------------------------
+#
+# Disable starting multiple nodes on a single system:
+#
+# node.max_local_storage_nodes: 1
+#
+# Require explicit names when deleting indices:
+#
+# action.destructive_requires_name: true
+http.cors.enabled: true
+http.cors.allow-origin: "*"
+# networking
+# network.bind_host: 0
+# network.publish_host: 0.0.0.0
+network.host: 0.0.0.0
\ No newline at end of file
diff --git a/dashboard/files/config/kibana.yml b/dashboard/files/config/kibana.yml
new file mode 100644
index 0000000..2151e64
--- /dev/null
+++ b/dashboard/files/config/kibana.yml
@@ -0,0 +1,95 @@
+#
+# Copyright 2014 The Charles Stark Draper Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ======================== Kibana Configuration =========================
+# Kibana is served by a back end server. This controls which port to use.
+# server.port: 5601
+
+# The host to bind the server to.
+# server.host: "localhost"
+
+# If you are running kibana behind a proxy, and want to mount it at a path,
+# specify that path here. The basePath can't end in a slash.
+# server.basePath: ""
+
+# The maximum payload size in bytes on incoming server requests.
+# server.maxPayloadBytes: 1048576
+
+# The Elasticsearch instance to use for all your queries.
+# elasticsearch.url: "http://localhost:9200"
+
+# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
+# then the host you use to connect to *this* Kibana instance will be sent.
+# elasticsearch.preserveHost: true
+
+# Kibana uses an index in Elasticsearch to store saved searches, visualizations
+# and dashboards. It will create a new index if it doesn't already exist.
+# kibana.index: ".kibana"
+
+# The default application to load.
+# kibana.defaultAppId: "discover"
+
+# If your Elasticsearch is protected with basic auth, these are the user credentials
+# used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
+# users will still need to authenticate with Elasticsearch (which is proxied through
+# the Kibana server)
+# elasticsearch.username: "user"
+# elasticsearch.password: "pass"
+
+# SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
+# server.ssl.cert: /path/to/your/server.crt
+# server.ssl.key: /path/to/your/server.key
+
+# Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
+# elasticsearch.ssl.cert: /path/to/your/client.crt
+# elasticsearch.ssl.key: /path/to/your/client.key
+
+# If you need to provide a CA certificate for your Elasticsearch instance, put
+# the path of the pem file here.
+# elasticsearch.ssl.ca: /path/to/your/CA.pem
+
+# Set to false to have a complete disregard for the validity of the SSL
+# certificate.
+# elasticsearch.ssl.verify: true
+
+# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
+# request_timeout setting
+# elasticsearch.pingTimeout: 1500
+
+# Time in milliseconds to wait for responses from the back end or elasticsearch.
+# This must be > 0
+# elasticsearch.requestTimeout: 300000
+
+# Time in milliseconds for Elasticsearch to wait for responses from shards.
+# Set to 0 to disable.
+# elasticsearch.shardTimeout: 0
+
+# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
+# elasticsearch.startupTimeout: 5000
+
+# Set the path to where you would like the process id file to be created.
+# pid.file: /var/run/kibana.pid
+
+# If you would like to send the log output to a file you can set the path below.
+# logging.dest: stdout
+
+# Set this to true to suppress all logging output.
+# logging.silent: false
+
+# Set this to true to suppress all logging output except for error messages.
+# logging.quiet: false
+
+# Set this to true to log all events, including system usage information and all requests.
+# logging.verbose: false
diff --git a/dashboard/files/xdata.conf b/dashboard/files/config/xdata.conf
similarity index 60%
rename from dashboard/files/xdata.conf
rename to dashboard/files/config/xdata.conf
index 52ac3e9..b007cbe 100644
--- a/dashboard/files/xdata.conf
+++ b/dashboard/files/config/xdata.conf
@@ -1,67 +1,85 @@
-#
-# Copyright 2014 The Charles Stark Draper Laboratory
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-input {
- file {
- codec => "json"
- path => [
- "/var/log/xdata/xdata-old.log",
- "/var/log/xdata/xdata-v2.log",
- "/var/log/xdata/xdata-v3.log"
- ]
- start_position => beginning
- }
-}
-
-filter {
- grok {
- match => [ "apiVersion", "(?<major_ver>\d+).(?<minor_ver>\d+)(.(?<patch_ver>\d+))?" ]
- }
-
- grok {
- match => [ "useraleVersion", "(?<major_ver>\d+).(?<minor_ver>\d+)(.(?<patch_ver>\d+))?" ]
- }
-
- mutate {
- convert => { "major_ver" => "integer" }
- convert => { "minor_ver" => "integer" }
- convert => { "patch_ver" => "integer" }
- }
-}
-
-output {
- if [oid] {
- elasticsearch {
- index => xdata_old
- host => localhost
- index_type => testing
- }
- } else if [major_ver] > 2 {
- elasticsearch {
- index => xdata_v3
- host => localhost
- index_type => testing
- }
- } else {
- elasticsearch {
- index => xdata_v2
- host => localhost
- index_type => testing
- }
- }
- stdout { codec => rubydebug }
-}
-
+#
+# Copyright 2014 The Charles Stark Draper Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input {
+ # Accept logs from file
+ file {
+ codec => "json"
+ path => [
+ "/var/log/xdata/xdata-old.log",
+ "/var/log/xdata/xdata-v2.log",
+ "/var/log/xdata/xdata-v3.log"
+ ]
+ start_position => beginning
+ #sincedb_path => "/dev/null"
+ }
+}
+
+filter {
+ grok {
+ match => [ "apiVersion", "(?<major_ver>\d+).(?<minor_ver>\d+)(.(?<patch_ver>\d+))?" ]
+ }
+
+ grok {
+ match => [ "useraleVersion", "(?<major_ver>\d+).(?<minor_ver>\d+)(.(?<patch_ver>\d+))?" ]
+ }
+
+ mutate {
+ convert => { "major_ver" => "integer" }
+ convert => { "minor_ver" => "integer" }
+ convert => { "patch_ver" => "integer" }
+ }
+}
+
+output {
+ # Output to Elasticsearch instance (depending on version number)
+ if [oid] {
+ elasticsearch {
+ hosts => ["localhost"]
+ action => "index"
+ index => "xdata_old"
+ document_type => "testing"
+ #template_overwrite => true
+ #template => "/vagrant/files/templates/testing_old.json"
+ #template_name => "testing-old"
+ }
+ } else if [major_ver] > 2 {
+ elasticsearch {
+ hosts => ["localhost"]
+ action => "index"
+ index => "xdata_v3"
+ document_type => "testing"
+ #template_overwrite => true
+ #template => "/vagrant/files/templates/testing_v3.json"
+ #template_name => "testing"
+ }
+ } else {
+ elasticsearch {
+ hosts => ["localhost"]
+ action => "index"
+ index => "xdata_v2"
+ document_type => "testing"
+ #template_overwrite => true
+ #template => "/vagrant/files/templates/testing_v2.json"
+ #template_name => "testing"
+ }
+ }
+
+ # Ship logs to Database
+
+ # Debug
+ stdout { codec => rubydebug }
+}
\ No newline at end of file
diff --git a/dashboard/files/XDATA-Dashboard-Old.json b/dashboard/files/data/XDATA-Dashboard-Old.json
similarity index 100%
rename from dashboard/files/XDATA-Dashboard-Old.json
rename to dashboard/files/data/XDATA-Dashboard-Old.json
diff --git a/dashboard/files/XDATA-Dashboard-v2.json b/dashboard/files/data/XDATA-Dashboard-v2.json
similarity index 100%
rename from dashboard/files/XDATA-Dashboard-v2.json
rename to dashboard/files/data/XDATA-Dashboard-v2.json
diff --git a/dashboard/files/XDATA-Dashboard-v3.json b/dashboard/files/data/XDATA-Dashboard-v3.json
similarity index 100%
rename from dashboard/files/XDATA-Dashboard-v3.json
rename to dashboard/files/data/XDATA-Dashboard-v3.json
diff --git a/dashboard/files/xdata-old.log b/dashboard/files/data/xdata-old.log
similarity index 100%
rename from dashboard/files/xdata-old.log
rename to dashboard/files/data/xdata-old.log
diff --git a/dashboard/files/elasticsearch.yml b/dashboard/files/elasticsearch.yml
deleted file mode 100644
index 3cdc70e..0000000
--- a/dashboard/files/elasticsearch.yml
+++ /dev/null
@@ -1,403 +0,0 @@
-#
-# Copyright 2014 The Charles Stark Draper Laboratory
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##################### Elasticsearch Configuration Example #####################
-
-# This file contains an overview of various configuration settings,
-# targeted at operations staff. Application developers should
-# consult the guide at <http://elasticsearch.org/guide>.
-#
-# The installation procedure is covered at
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
-#
-# Elasticsearch comes with reasonable defaults for most settings,
-# so you can try it out without bothering with configuration.
-#
-# Most of the time, these defaults are just fine for running a production
-# cluster. If you're fine-tuning your cluster, or wondering about the
-# effect of certain configuration option, please _do ask_ on the
-# mailing list or IRC channel [http://elasticsearch.org/community].
-
-# Any element in the configuration can be replaced with environment variables
-# by placing them in ${...} notation. For example:
-#
-#node.rack: ${RACK_ENV_VAR}
-
-# For information on supported formats and syntax for the config file, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
-
-
-################################### Cluster ###################################
-
-# Cluster name identifies your cluster for auto-discovery. If you're running
-# multiple clusters on the same network, make sure you're using unique names.
-#
-#cluster.name: elasticsearch
-
-
-#################################### Node #####################################
-
-# Node names are generated dynamically on startup, so you're relieved
-# from configuring them manually. You can tie this node to a specific name:
-#
-#node.name: "Franz Kafka"
-
-# Every node can be configured to allow or deny being eligible as the master,
-# and to allow or deny to store the data.
-#
-# Allow this node to be eligible as a master node (enabled by default):
-#
-#node.master: true
-#
-# Allow this node to store data (enabled by default):
-#
-#node.data: true
-
-# You can exploit these settings to design advanced cluster topologies.
-#
-# 1. You want this node to never become a master node, only to hold data.
-# This will be the "workhorse" of your cluster.
-#
-#node.master: false
-#node.data: true
-#
-# 2. You want this node to only serve as a master: to not store any data and
-# to have free resources. This will be the "coordinator" of your cluster.
-#
-#node.master: true
-#node.data: false
-#
-# 3. You want this node to be neither master nor data node, but
-# to act as a "search load balancer" (fetching data from nodes,
-# aggregating results, etc.)
-#
-#node.master: false
-#node.data: false
-
-# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
-# Node Info API [http://localhost:9200/_nodes] or GUI tools
-# such as <http://www.elasticsearch.org/overview/marvel/>,
-# <http://github.com/karmi/elasticsearch-paramedic>,
-# <http://github.com/lukas-vlcek/bigdesk> and
-# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
-
-# A node can have generic attributes associated with it, which can later be used
-# for customized shard allocation filtering, or allocation awareness. An attribute
-# is a simple key value pair, similar to node.key: value, here is an example:
-#
-#node.rack: rack314
-
-# By default, multiple nodes are allowed to start from the same installation location
-# to disable it, set the following:
-#node.max_local_storage_nodes: 1
-
-
-#################################### Index ####################################
-
-# You can set a number of options (such as shard/replica options, mapping
-# or analyzer definitions, translog settings, ...) for indices globally,
-# in this file.
-#
-# Note, that it makes more sense to configure index settings specifically for
-# a certain index, either when creating it or by using the index templates API.
-#
-# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
-# for more information.
-
-# Set the number of shards (splits) of an index (5 by default):
-#
-#index.number_of_shards: 5
-
-# Set the number of replicas (additional copies) of an index (1 by default):
-#
-#index.number_of_replicas: 1
-
-# Note, that for development on a local machine, with small indices, it usually
-# makes sense to "disable" the distributed features:
-#
-#index.number_of_shards: 1
-#index.number_of_replicas: 0
-
-# These settings directly affect the performance of index and search operations
-# in your cluster. Assuming you have enough machines to hold shards and
-# replicas, the rule of thumb is:
-#
-# 1. Having more *shards* enhances the _indexing_ performance and allows to
-# _distribute_ a big index across machines.
-# 2. Having more *replicas* enhances the _search_ performance and improves the
-# cluster _availability_.
-#
-# The "number_of_shards" is a one-time setting for an index.
-#
-# The "number_of_replicas" can be increased or decreased anytime,
-# by using the Index Update Settings API.
-#
-# Elasticsearch takes care about load balancing, relocating, gathering the
-# results from nodes, etc. Experiment with different settings to fine-tune
-# your setup.
-
-# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
-# the index status.
-
-
-#################################### Paths ####################################
-
-# Path to directory containing configuration (this file and logging.yml):
-#
-#path.conf: /path/to/conf
-
-# Path to directory where to store index data allocated for this node.
-#
-#path.data: /path/to/data
-#
-# Can optionally include more than one location, causing data to be striped across
-# the locations (a la RAID 0) on a file level, favouring locations with most free
-# space on creation. For example:
-#
-#path.data: /path/to/data1,/path/to/data2
-
-# Path to temporary files:
-#
-#path.work: /path/to/work
-
-# Path to log files:
-#
-#path.logs: /path/to/logs
-
-# Path to where plugins are installed:
-#
-#path.plugins: /path/to/plugins
-
-
-#################################### Plugin ###################################
-
-# If a plugin listed here is not installed for current node, the node will not start.
-#
-#plugin.mandatory: mapper-attachments,lang-groovy
-
-
-################################### Memory ####################################
-
-# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
-# it _never_ swaps.
-#
-# Set this property to true to lock the memory:
-#
-#bootstrap.mlockall: true
-
-# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
-# to the same value, and that the machine has enough memory to allocate
-# for Elasticsearch, leaving enough memory for the operating system itself.
-#
-# You should also make sure that the Elasticsearch process is allowed to lock
-# the memory, eg. by using `ulimit -l unlimited`.
-
-
-############################## Network And HTTP ###############################
-
-# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
-# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
-# communication. (the range means that if the port is busy, it will automatically
-# try the next port).
-
-# Set the bind address specifically (IPv4 or IPv6):
-#
-#network.bind_host: 192.168.0.1
-
-# Set the address other nodes will use to communicate with this node. If not
-# set, it is automatically derived. It must point to an actual IP address.
-#
-#network.publish_host: 192.168.0.1
-
-# Set both 'bind_host' and 'publish_host':
-#
-#network.host: 192.168.0.1
-
-# Set a custom port for the node to node communication (9300 by default):
-#
-#transport.tcp.port: 9300
-
-# Enable compression for all communication between nodes (disabled by default):
-#
-#transport.tcp.compress: true
-
-# Set a custom port to listen for HTTP traffic:
-#
-#http.port: 9200
-
-# Set a custom allowed content length:
-#
-#http.max_content_length: 100mb
-
-# Disable HTTP completely:
-#
-#http.enabled: false
-
-
-################################### Gateway ###################################
-
-# The gateway allows for persisting the cluster state between full cluster
-# restarts. Every change to the state (such as adding an index) will be stored
-# in the gateway, and when the cluster starts up for the first time,
-# it will read its state from the gateway.
-
-# There are several types of gateway implementations. For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
-
-# The default gateway type is the "local" gateway (recommended):
-#
-#gateway.type: local
-
-# Settings below control how and when to start the initial recovery process on
-# a full cluster restart (to reuse as much local data as possible when using shared
-# gateway).
-
-# Allow recovery process after N nodes in a cluster are up:
-#
-#gateway.recover_after_nodes: 1
-
-# Set the timeout to initiate the recovery process, once the N nodes
-# from previous setting are up (accepts time value):
-#
-#gateway.recover_after_time: 5m
-
-# Set how many nodes are expected in this cluster. Once these N nodes
-# are up (and recover_after_nodes is met), begin recovery process immediately
-# (without waiting for recover_after_time to expire):
-#
-#gateway.expected_nodes: 2
-
-
-############################# Recovery Throttling #############################
-
-# These settings allow to control the process of shards allocation between
-# nodes during initial recovery, replica allocation, rebalancing,
-# or when adding and removing nodes.
-
-# Set the number of concurrent recoveries happening on a node:
-#
-# 1. During the initial recovery
-#
-#cluster.routing.allocation.node_initial_primaries_recoveries: 4
-#
-# 2. During adding/removing nodes, rebalancing, etc
-#
-#cluster.routing.allocation.node_concurrent_recoveries: 2
-
-# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
-#
-#indices.recovery.max_bytes_per_sec: 20mb
-
-# Set to limit the number of open concurrent streams when
-# recovering a shard from a peer:
-#
-#indices.recovery.concurrent_streams: 5
-
-
-################################## Discovery ##################################
-
-# Discovery infrastructure ensures nodes can be found within a cluster
-# and master node is elected. Multicast discovery is the default.
-
-# Set to ensure a node sees N other master eligible nodes to be considered
-# operational within the cluster. This should be set to a quorum/majority of
-# the master-eligible nodes in the cluster.
-#
-#discovery.zen.minimum_master_nodes: 1
-
-# Set the time to wait for ping responses from other nodes when discovering.
-# Set this option to a higher value on a slow or congested network
-# to minimize discovery failures:
-#
-#discovery.zen.ping.timeout: 3s
-
-# For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
-
-# Unicast discovery allows to explicitly control which nodes will be used
-# to discover the cluster. It can be used when multicast is not present,
-# or to restrict the cluster communication-wise.
-#
-# 1. Disable multicast discovery (enabled by default):
-#
-#discovery.zen.ping.multicast.enabled: false
-#
-# 2. Configure an initial list of master nodes in the cluster
-# to perform discovery when new nodes (master or data) are started:
-#
-#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
-
-# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
-#
-# You have to install the cloud-aws plugin for enabling the EC2 discovery.
-#
-# For more information, see
-# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
-#
-# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
-# for a step-by-step tutorial.
-
-# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
-#
-# You have to install the cloud-gce plugin for enabling the GCE discovery.
-#
-# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
-
-# Azure discovery allows to use Azure API in order to perform discovery.
-#
-# You have to install the cloud-azure plugin for enabling the Azure discovery.
-#
-# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
-
-################################## Slow Log ##################################
-
-# Shard level query and fetch threshold logging.
-
-#index.search.slowlog.threshold.query.warn: 10s
-#index.search.slowlog.threshold.query.info: 5s
-#index.search.slowlog.threshold.query.debug: 2s
-#index.search.slowlog.threshold.query.trace: 500ms
-
-#index.search.slowlog.threshold.fetch.warn: 1s
-#index.search.slowlog.threshold.fetch.info: 800ms
-#index.search.slowlog.threshold.fetch.debug: 500ms
-#index.search.slowlog.threshold.fetch.trace: 200ms
-
-#index.indexing.slowlog.threshold.index.warn: 10s
-#index.indexing.slowlog.threshold.index.info: 5s
-#index.indexing.slowlog.threshold.index.debug: 2s
-#index.indexing.slowlog.threshold.index.trace: 500ms
-
-################################## GC Logging ################################
-
-#monitor.jvm.gc.young.warn: 1000ms
-#monitor.jvm.gc.young.info: 700ms
-#monitor.jvm.gc.young.debug: 400ms
-
-#monitor.jvm.gc.old.warn: 10s
-#monitor.jvm.gc.old.info: 5s
-#monitor.jvm.gc.old.debug: 2s
-
-################################## Security ################################
-
-# Uncomment if you want to enable JSONP as a valid return transport on the
-# http server. With this enabled, it may pose a security risk, so disabling
-# it unless you need it is recommended (it is disabled by default).
-#
-#http.jsonp.enable: true
-http.cors.allow-origin: "*"
-http.cors.enabled: true
diff --git a/dashboard/files/templates/testing_old.json b/dashboard/files/templates/testing_old.json
new file mode 100644
index 0000000..95c1e9e
--- /dev/null
+++ b/dashboard/files/templates/testing_old.json
@@ -0,0 +1,166 @@
+{
+ "template":"testing-old",
+ "order":1,
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0,
+ "index.refresh_interval":"5s"
+ },
+ "testing":{
+ "properties":{
+ "@timestamp":{
+ "type":"date",
+ "format":"date_optional_time"
+ },
+ "@version":{
+ "type":"string"
+ },
+ "apiVersion":{
+ "type":"string"
+ },
+ "client":{
+ "type":"string"
+ },
+ "component":{
+ "properties":{
+ "name":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "version":{
+ "type":"string"
+ }
+ }
+ },
+ "host":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "impLanguage":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "major_ver":{
+ "type":"integer"
+ },
+ "meta":{
+ "properties":{
+ "expanded":{
+ "type":"boolean"
+ },
+ "UIOjectId":{
+ "type":"string"
+ },
+ "UIOjectIds":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "UIOjectType":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "UIContainerId":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "contextId":{
+ "type":"string"
+ },
+ "duration":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "endDate":{
+ "type":"string"
+ },
+ "entityCount":{
+ "type":"integer"
+ },
+ "fileId":{
+ "type":"string"
+ },
+ "fromDragDropEvent":{
+ "type":"boolean"
+ },
+ "numBuckets":{
+ "type":"integer"
+ },
+ "page":{
+ "type":"integer"
+ },
+ "requestedFromColumn":{
+ "type":"boolean"
+ },
+ "searchControlId":{
+ "type":"string"
+ },
+ "sessionID":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "showDetails":{
+ "type":"boolean"
+ },
+ "startDate":{
+ "type":"string"
+ },
+ "totalColumns":{
+ "type":"integer"
+ },
+ "xfld":{
+ "type":"string"
+ }
+ }
+ },
+ "minor_ver":{
+ "type":"integer"
+ },
+ "oid":{
+ "properties":{
+ "$oid":{
+ "type":"string"
+ }
+ }
+ },
+ "parms":{
+ "properties":{
+ "activity":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "desc":{
+ "type":"string"
+ },
+ "wf_state":{
+ "type":"integer"
+ },
+ "wf_version":{
+ "type":"float"
+ }
+ }
+ },
+ "path":{
+ "type":"string"
+ },
+ "sessionID":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "tags":{
+ "type":"string"
+ },
+ "timestamp":{
+ "properties":{
+ "$date":{
+ "type":"date",
+ "format":"dateOptionalTime"
+ }
+ }
+ },
+ "type":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dashboard/files/templates/testing_v2.json b/dashboard/files/templates/testing_v2.json
new file mode 100644
index 0000000..59824a7
--- /dev/null
+++ b/dashboard/files/templates/testing_v2.json
@@ -0,0 +1,90 @@
+{
+ "template" : "testing-v2",
+ "order" : 1,
+ "settings" : {
+ "number_of_shards" : 1,
+ "number_of_replicas" : 0,
+ "index.refresh_interval" : "60s"
+ },
+ "mappings" : {
+ "xdata_v2" : {
+ "properties" : {
+ "@timestamp" : {
+ "type" : "date",
+ "format" : "dateOptionalTime"
+ },
+ "@version" : {
+ "type" : "string"
+ },
+ "apiVersion" : {
+ "type" : "string"
+ },
+ "client" : {
+ "type" : "string"
+ },
+ "component" : {
+ "properties" : {
+ "name" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "version" : {
+ "type" : "string"
+ }
+ }
+ },
+ "host" : {
+ "type" : "string"
+ },
+ "impLanguage" : {
+ "type" : "string"
+ },
+ "major_ver" : {
+ "type" : "long"
+ },
+ "minor_ver" : {
+ "type" : "long"
+ },
+ "parms" : {
+ "properties" : {
+ "activity" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "desc" : {
+ "type" : "string"
+ },
+ "wf_state" : {
+ "type" : "long"
+ },
+ "wf_version" : {
+ "type" : "string"
+ }
+ }
+ },
+ "patch_ver" : {
+ "type" : "long"
+ },
+ "path" : {
+ "type" : "string"
+ },
+ "sessionID" : {
+ "type" : "string"
+ },
+ "tags" : {
+ "type" : "string"
+ },
+ "timestamp" : {
+ "type" : "date",
+ "format" : "dateOptionalTime"
+ },
+ "type" : {
+ "type" : "string"
+ },
+ "wf_state_longname" : {
+ "type" : "string"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dashboard/files/templates/testing_v3.json b/dashboard/files/templates/testing_v3.json
new file mode 100644
index 0000000..7c4335d
--- /dev/null
+++ b/dashboard/files/templates/testing_v3.json
@@ -0,0 +1,90 @@
+{
+ "template" : "testing-v3",
+ "order" : 1,
+ "settings" : {
+ "number_of_shards" : 1,
+ "number_of_replicas" : 0,
+ "index.refresh_interval" : "60s"
+ },
+ "mappings" : {
+ "xdata_v3" : {
+ "properties" : {
+ "@timestamp" : {
+ "type" : "date",
+ "format" : "dateOptionalTime"
+ },
+ "@version" : {
+ "type" : "string"
+ },
+ "action" : {
+ "type" : "string"
+ },
+ "activity" : {
+ "type" : "string"
+ },
+ "client" : {
+ "type" : "string"
+ },
+ "elementGroup" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "elementId" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "elementSub" : {
+ "type" : "string"
+ },
+ "elementType" : {
+ "type" : "string"
+ },
+ "host" : {
+ "type" : "string"
+ },
+ "language" : {
+ "type" : "string"
+ },
+ "major_ver" : {
+ "type" : "long"
+ },
+ "meta" : {
+ "type" : "object"
+ },
+ "minor_ver" : {
+ "type" : "long"
+ },
+ "patch_ver" : {
+ "type" : "long"
+ },
+ "path" : {
+ "type" : "string"
+ },
+ "sessionID" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "source" : {
+ "type" : "string"
+ },
+ "tags" : {
+ "type" : "string"
+ },
+ "timestamp" : {
+ "type" : "date",
+ "format" : "dateOptionalTime"
+ },
+ "toolName" : {
+ "index" : "not_analyzed",
+ "type" : "string"
+ },
+ "toolVersion" : {
+ "type" : "string"
+ },
+ "useraleVersion" : {
+ "type" : "string"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dashboard/files/twisted_app.py b/dashboard/files/twisted_app.py
index 53ae76d..ffdc6d9 100644
--- a/dashboard/files/twisted_app.py
+++ b/dashboard/files/twisted_app.py
@@ -28,8 +28,7 @@
import simplejson
-KIBANA = '/home/vagrant/kibana-3.1.2'
-ALLOW_ORIGIN = 'http://192.168.1.10'
+ALLOW_ORIGIN = 'http://192.16.1.10'
if not os.path.exists('/var/log/xdata'):
os.makedirs('/var/log/xdata')
@@ -101,8 +100,6 @@
loggerv3 = logging.getLogger('xdata-v3')
logger_err = logging.getLogger('error')
-kibana = File(KIBANA)
-
wf_dict = {
0: "WF_OTHER",
1: "WF_DEFINE",
@@ -145,7 +142,6 @@
return ''
root = Resource()
-root.putChild("kibana", kibana)
root.putChild("send_log", Logger())
# create a resource to serve static files
@@ -153,4 +149,4 @@
application = service.Application("User-ALE")
# attach the service to its parent application
-tmp_service.setServiceParent(application)
\ No newline at end of file
+tmp_service.setServiceParent(application)
diff --git a/dashboard/scripts/backup.sh b/dashboard/scripts/backup.sh
new file mode 100644
index 0000000..c33fe0e
--- /dev/null
+++ b/dashboard/scripts/backup.sh
@@ -0,0 +1,35 @@
+#
+# Copyright 2014 The Charles Stark Draper Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#!/bin/bash
+
+INDEXNAME="xdata_v3, xdata_v2"
+BACKUPDIR="/mnt/es-backups/"
+
+sudo mkdir -p $BACKUPDIR || exit $?
+
+# Create the repository
+curl -XPUT 'http://localhost:9200/_snapshot/xdata_backup' -d `{ "type" : "fs",
+ "settings" : {
+ "location" : "$BACKUPDIR",
+ "max_snapshot_bytes_per_sec" : "50mb",
+ "max_restore_bytes_per_sec" : "50mb"
+ }
+ }`
+
+
+# Backup only relevant indices. We speciy this as to prevent backing up the .kibana index and any other test indexes that were created
+curl -XPUT 'http://localhost:9200/_snapshot/xdata_backup/snapshot' -d `{ "indices" : "$INDEXNAME" }`
diff --git a/dashboard/scripts/install.sh b/dashboard/scripts/install.sh
index 82569e8..fb773eb 100644
--- a/dashboard/scripts/install.sh
+++ b/dashboard/scripts/install.sh
@@ -15,38 +15,66 @@
#
#!/bin/bash
-MINICONDA_SCRIPT="http://repo.continuum.io/miniconda/Miniconda-3.7.0-Linux-x86_64.sh"
-ELASTIC_DPKG_SRC="https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.2.deb"
-LOGSTASH_DPKG_SRC="https://download.elasticsearch.org/logstash/logstash/packages/debian/logstash_1.4.2-1-2c0f5a1_all.deb"
-KIBANA_SRC="https://download.elasticsearch.org/kibana/kibana/kibana-3.1.2.tar.gz"
+# Latest and greatest source packages
+MINICONDA_SCRIPT="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh"
+
+# Update box & install openjdk and mongodb
sudo -E apt-get update || exit $?
sudo -E apt-get -y install openjdk-7-jdk || exit $?
-wget -q $MINICONDA_SCRIPT || exit $?
-chmod +x ./Miniconda-3.7.0-Linux-x86_64.sh || exit $?
-./Miniconda-3.7.0-Linux-x86_64.sh -b || exit $?
+sudo -E apt-get -y install mongodb || exit $?
-echo export PATH="$HOME/miniconda/bin:$PATH" >> $HOME/.bashrc
+# Install Miniconda
+wget -q $MINICONDA_SCRIPT || exit $?
+chmod +x ./Miniconda-*.sh || exit $?
+./Miniconda-*.sh -b || exit $?
+echo export PATH="$HOME/miniconda2/bin:$PATH" >> $HOME/.bashrc
source $HOME/.bashrc
-$HOME/miniconda/bin/conda update --yes conda || exit $?
+$HOME/miniconda2/bin/conda update --yes conda || exit $?
-wget -q $ELASTIC_DPKG_SRC $LOGSTASH_DPKG_SRC || exit $?
-sudo dpkg -i elasticsearch-1.4.2.deb || exit $?
-sudo dpkg -i logstash_1.4.2-1-2c0f5a1_all.deb || exit $?
+# Install Elasticsearch
+wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
+echo "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-2.x.list
+sudo -E apt-get update || exit $?
+sudo -E apt-get -y install elasticsearch || exit $?
-# Download and install Kibana to the vagrant box. This involves downloading
-# Kibana 3.1.2, extracting the contents of the tar ball, and copying the
-# kibanan files to /etc/elasticsearch and /etc/logstash
-wget -q $KIBANA_SRC || exit $?
-tar -xvf kibana-3.1.2.tar.gz || exit $?
-sudo cp /vagrant/files/elasticsearch.yml /etc/elasticsearch/ || exit $?
-sudo cp /vagrant/files/xdata.conf /etc/logstash/conf.d/ || exit $?
-sudo cp /vagrant/files/twisted_app.py $HOME/ || exit $?
+# Install Elastic HQ Plugin
+sudo /usr/share/elasticsearch/bin/plugin install royrusso/elasticsearch-HQ || exit $?
+
+# Install Logstash
+echo "deb http://packages.elastic.co/logstash/2.2/debian stable main" | sudo tee -a /etc/apt/sources.list.d/logstash-2.2.x.list
+sudo -E apt-get update || exit $?
+sudo -E apt-get -y install logstash || exit $?
+
+# Install Kibana
+echo "deb http://packages.elastic.co/kibana/4.4/debian stable main" | sudo tee -a /etc/apt/sources.list.d/kibana-4.4.x.list
+sudo -E apt-get update || exit $?
+sudo -E apt-get -y install kibana || exit $?
+
+# Copy over configuration files
+sudo cp /vagrant/files/config/elasticsearch.yml /etc/elasticsearch/ || exit $?
+sudo cp /vagrant/files/config/xdata.conf /etc/logstash/conf.d/ || exit $?
+sudo cp /vagrant/files/twisted_app.py $HOME/ || exit $?
+sudo cp /vagrant/files/config/kibana.yml /opt/kibana/config/ || exit $?
# Restart all the services to ensure the configurations are being used properly
# and Run the kibana twisted web server so the developer has access to the
# dashboad provided by Kibana.
-sudo mkdir /var/log/xdata || exit $?
-sudo touch /var/log/xdata/xdata.log || exit $?
+sudo mkdir /var/log/xdata || exit $?
+sudo touch /var/log/xdata/xdata.log || exit $?
-cp /vagrant/files/XDATA-Dashboard-v3.json $HOME/kibana-3.1.2/app/dashboards/default.json || exit $?
+# This may need to be rewritten
+# Simply create .kibana index and add dashboard there?
+#cp /vagrant/files/data/XDATA-Dashboard-v3.json $HOME/$KIBANA/app/dashboards/default.json || exit $?
+
+# Register cron job to execute backup.sh every 6 hours
+# ┌───────────── min (0 - 59)
+# │ ┌────────────── hour (0 - 23)
+# │ │ ┌─────────────── day of month (1 - 31)
+# │ │ │ ┌──────────────── month (1 - 12)
+# │ │ │ │ ┌───────────────── day of week (0 - 6) (0 to 6 are Sunday to Saturday, or use names; 7 is Sunday, the same as 0)
+# │ │ │ │ │
+# │ │ │ │ │
+# * * * * * command to execute
+sudo chmod +x /vagrant/files/scripts/backup.sh || exit $?
+sudo crontab -l | { cat; echo "0 */6 * * * /vagrant/files/scripts/backup.sh > /dev/null 2>&1"; } | crontab - || exit $?
diff --git a/dashboard/scripts/restart.sh b/dashboard/scripts/restart.sh
index b615d31..8277f12 100644
--- a/dashboard/scripts/restart.sh
+++ b/dashboard/scripts/restart.sh
@@ -14,6 +14,8 @@
# limitations under the License.
#
+#!/bin/bash
+
sudo service elasticsearch restart
# For Logstash and ElasticSearch, it takes a while before the
@@ -29,110 +31,180 @@
sleep 1;
done
-service logstash stop
+sudo service logstash stop
+
+# Delete XData indexes
curl -XDELETE 'http://localhost:9200/xdata_v3/'
curl -XDELETE 'http://localhost:9200/xdata_v2/'
curl -XDELETE 'http://localhost:9200/xdata_old/'
-curl -XPUT 'http://127.0.0.1:9200/xdata_old/'
+# Create XData indexes
+curl -XPUT 'http://localhost:9200/xdata_v3/'
+curl -XPUT 'http://localhost:9200/xdata_v2/'
+curl -XPUT 'http://localhost:9200/xdata_old/'
+
curl -XPUT 'http://localhost:9200/xdata_old/testing/_mapping' -d '
{
-
- "testing" : {
- "properties" : {
- "@timestamp" : {
- "type" : "date",
- "format" : "dateOptionalTime"
- },
- "@version" : {
- "type" : "string"
- },
- "apiVersion" : {
- "type" : "string"
- },
- "client" : {
- "type" : "string"
- },
- "component" : {
- "properties" : {
- "name" : {
- "index" : "not_analyzed",
- "type" : "string"
- },
- "version" : {
- "type" : "string"
- }
+ "testing":{
+ "properties":{
+ "@timestamp":{
+ "type":"date",
+ "format":"date_optional_time"
+ },
+ "@version":{
+ "type":"string"
+ },
+ "apiVersion":{
+ "type":"string"
+ },
+ "client":{
+ "type":"string"
+ },
+ "component":{
+ "properties":{
+ "name":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "version":{
+ "type":"string"
+ }
}
- },
- "host" : {
- "type" : "string"
- },
- "impLanguage" : {
- "type" : "string"
- },
- "major_ver" : {
- "type" : "long"
- },
- "meta" : {
- "type" : "long"
- },
- "minor_ver" : {
- "type" : "long"
- },
- "oid" : {
- "properties" : {
- "$oid" : {
- "type" : "string"
- }
+ },
+ "host":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "impLanguage":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "major_ver":{
+ "type":"integer"
+ },
+ "meta":{
+ "properties":{
+ "expanded":{
+ "type":"boolean"
+ },
+ "UIOjectId":{
+ "type":"string"
+ },
+ "UIOjectIds":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "UIOjectType":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "UIContainerId":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "contextId":{
+ "type":"string"
+ },
+ "duration":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "endDate":{
+ "type":"string"
+ },
+ "entityCount":{
+ "type":"integer"
+ },
+ "fileId":{
+ "type":"string"
+ },
+ "fromDragDropEvent":{
+ "type":"boolean"
+ },
+ "numBuckets":{
+ "type":"integer"
+ },
+ "page":{
+ "type":"integer"
+ },
+ "requestedFromColumn":{
+ "type":"boolean"
+ },
+ "searchControlId":{
+ "type":"string"
+ },
+ "sessionID":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "showDetails":{
+ "type":"boolean"
+ },
+ "startDate":{
+ "type":"string"
+ },
+ "totalColumns":{
+ "type":"integer"
+ },
+ "xfld":{
+ "type":"string"
+ }
}
- },
- "parms" : {
- "properties" : {
- "activity" : {
- "index" : "not_analyzed",
- "type" : "string"
- },
- "desc" : {
- "type" : "string"
- },
- "wf_state" : {
- "type" : "string"
- },
- "wf_version" : {
- "type" : "string"
- }
+ },
+ "minor_ver":{
+ "type":"integer"
+ },
+ "oid":{
+ "properties":{
+ "$oid":{
+ "type":"string"
+ }
}
- },
- "patch_ver" : {
- "type" : "long"
- },
- "path" : {
- "type" : "string"
- },
- "sessionID" : {
- "type" : "string"
- },
- "tags" : {
- "type" : "string"
- },
- "timestamp" : {
- "properties" : {
- "$date" : {
- "type" : "date",
- "format" : "dateOptionalTime"
- }
+ },
+ "parms":{
+ "properties":{
+ "activity":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "desc":{
+ "type":"string"
+ },
+ "wf_state":{
+ "type":"string"
+ },
+ "wf_version":{
+ "type":"float"
+ }
}
- },
- "type" : {
- "type" : "string"
- }
- }
+ },
+ "path":{
+ "type":"string"
+ },
+ "sessionID":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "tags":{
+ "type":"string"
+ },
+ "timestamp":{
+ "properties":{
+ "$date":{
+ "type":"date",
+ "format":"dateOptionalTime"
+ }
+ }
+ },
+ "type":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
}
-
+ }
}
'
-curl -XPUT 'http://127.0.0.1:9200/xdata_v3/'
curl -XPUT 'http://localhost:9200/xdata_v3/testing/_mapping' -d '
{
@@ -218,9 +290,6 @@
}
'
-
-
-curl -XPUT 'http://127.0.0.1:9200/xdata_v2/'
curl -XPUT 'http://localhost:9200/xdata_v2/testing/_mapping' -d '
{
@@ -307,14 +376,24 @@
}
'
-rm /var/lib/logstash/.sincedb_*
-service logstash start
+# Remove .sincedb file to trigger logstash to reindex all xdata_* data
+sudo rm /var/lib/logstash/.sincedb_*
-PIDFILE=$HOME/twistd.pid
+# copy old xdata logs to elasticsearch
+sudo cp /vagrant/files/data/xdata-old.log /var/log/xdata/xdata-old.log
+
+sudo service logstash start
+
+# Start Kibana
+sudo service kibana start
+
+PIDFILE=/home/vagrant/twistd.pid
if [ -f $PIDFILE ]; then
echo 'Twisted Running, Killing it!'
- sudo -E kill `cat $PIDFILE`
+ sudo -E kill `sudo cat $PIDFILE`
fi
-sudo -E twistd --pidfile=$PIDFILE -y twisted_app.py
\ No newline at end of file
+# Run the twisted web server so the developer has access to the
+# dashboad provided by Kibana.
+sudo -E twistd --pidfile=$PIDFILE -y /home/vagrant/twisted_app.py
diff --git a/dashboard/scripts/restore.sh b/dashboard/scripts/restore.sh
new file mode 100644
index 0000000..d7afdee
--- /dev/null
+++ b/dashboard/scripts/restore.sh
@@ -0,0 +1,20 @@
+#
+# Copyright 2014 The Charles Stark Draper Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#!/bin/bash
+
+# Restore snapshot
+curl -XPUT 'http://localhost:9200/_snapshot/xdata_backup/snapshot_restore'
\ No newline at end of file