| |
| # Licensed to the Apache Software Foundation (ASF) under one or more |
| # contributor license agreements. See the NOTICE file distributed with |
| # this work for additional information regarding copyright ownership. |
| # The ASF licenses this file to You under the Apache License, Version 2.0 |
| # (the "License"); you may not use this file except in compliance with |
| # the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| ############################### |
| # configuration for all scripts |
| ############################### |
| # Fluo Home |
| test -z "$FLUO_HOME" && FLUO_HOME=/path/to/accumulo |
| # Fluo connection properties |
| FLUO_CONN=$FLUO_HOME/conf/fluo-conn.properties |
| # Fluo application name |
| FLUO_APP_NAME=stresso |
| # Set this to avoid Hadoop's old version of guava. This will make Hadoop's |
| # yarn command use a classloader when running code. This classloader isolates |
| # stresso runtime code from Hadoop's depedencies. |
| export HADOOP_USE_CLIENT_CLASSLOADER=true |
| |
| ############################### |
| # configuration for run-test.sh |
| ############################### |
| # Place where logs from test are placed |
| LOG_DIR=$BIN_DIR/../logs |
| # Maximum number to generate |
| MAX=$((10**9)) |
| #the number of splits to create in table |
| SPLITS=17 |
| # Number of mappers to run for data generation, which determines how many files |
| # generation outputs. The number of files determines how many mappers loading |
| # data will run. |
| MAPS=17 |
| # Number of reduce tasks |
| REDUCES=17 |
| # Number of random numbers to generate initially |
| GEN_INIT=$((10**6)) |
| # Number of random numbers to generate for each incremental step. |
| GEN_INCR=$((10**3)) |
| # Number of incremental steps. |
| ITERATIONS=3 |
| # Seconds to sleep between incremental steps. |
| SLEEP=30 |
| # Compact levels with less than the following possible nodes after loads |
| COMPACT_CUTOFF=$((256**3 + 1)) |
| # The fluo wait command is executed after this many incremental load steps. |
| WAIT_PERIOD=10 |
| # To run map reduce jobs, a shaded jar is built. The following properties |
| # determine what versions of Fluo and Accumulo client libs end up in the shaded |
| # jar. |
| FLUO_VERSION=$($FLUO_HOME/bin/fluo version) |
| ACCUMULO_VERSION=$(accumulo version) |
| |
| # The following Accumulo table properties will be set |
| read -r -d '' TABLE_PROPS << EOM |
| table.compaction.major.ratio=1.5 |
| table.file.compress.blocksize=8K |
| table.file.compress.blocksize.index=32K |
| table.file.compress.type=snappy |
| EOM |