blob: 20d22bb075f15571fb17c00850620b648368da07 [file] [log] [blame]
<!DOCTYPE HTML>
<html lang="" >
<head>
<meta charset="UTF-8">
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<title>Performance Tools ยท ActiveMQ Artemis Documentation</title>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="description" content="">
<meta name="generator" content="GitBook 3.2.3">
<link rel="stylesheet" href="gitbook/style.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-highlight/website.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-search/search.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-fontsettings/website.css">
<meta name="HandheldFriendly" content="true"/>
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="gitbook/images/apple-touch-icon-precomposed-152.png">
<link rel="shortcut icon" href="gitbook/images/favicon.ico" type="image/x-icon">
<link rel="next" href="configuration-index.html" />
<link rel="prev" href="perf-tuning.html" />
</head>
<body>
<div class="book">
<div class="book-summary">
<div id="book-search-input" role="search">
<input type="text" placeholder="Type to search" />
</div>
<nav role="navigation">
<ul class="summary">
<li class="chapter " data-level="1.1" data-path="./">
<a href="./">
Introduction
</a>
</li>
<li class="chapter " data-level="1.2" data-path="notice.html">
<a href="notice.html">
Legal Notice
</a>
</li>
<li class="chapter " data-level="1.3" data-path="preface.html">
<a href="preface.html">
Preface
</a>
</li>
<li class="chapter " data-level="1.4" data-path="project-info.html">
<a href="project-info.html">
Project Info
</a>
</li>
<li class="chapter " data-level="1.5" data-path="versions.html">
<a href="versions.html">
Versions
</a>
</li>
<li class="chapter " data-level="1.6" data-path="messaging-concepts.html">
<a href="messaging-concepts.html">
Messaging Concepts
</a>
</li>
<li class="chapter " data-level="1.7" data-path="architecture.html">
<a href="architecture.html">
Architecture
</a>
</li>
<li class="chapter " data-level="1.8" data-path="using-server.html">
<a href="using-server.html">
Using the Server
</a>
</li>
<li class="chapter " data-level="1.9" data-path="upgrading.html">
<a href="upgrading.html">
Upgrading
</a>
</li>
<li class="chapter " data-level="1.10" >
<span>
Address
</span>
<ul class="articles">
<li class="chapter " data-level="1.10.1" data-path="address-model.html">
<a href="address-model.html">
Model
</a>
</li>
<li class="chapter " data-level="1.10.2" data-path="address-settings.html">
<a href="address-settings.html">
Settings
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.11" data-path="protocols-interoperability.html">
<a href="protocols-interoperability.html">
Protocols and Interoperability
</a>
</li>
<li class="chapter " data-level="1.12" data-path="amqp.html">
<a href="amqp.html">
AMQP
</a>
<ul class="articles">
<li class="chapter " data-level="1.12.1" data-path="amqp-broker-connections.html">
<a href="amqp-broker-connections.html">
Broker Connections
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.13" data-path="mqtt.html">
<a href="mqtt.html">
MQTT
</a>
</li>
<li class="chapter " data-level="1.14" data-path="stomp.html">
<a href="stomp.html">
STOMP
</a>
</li>
<li class="chapter " data-level="1.15" data-path="openwire.html">
<a href="openwire.html">
OpenWire
</a>
</li>
<li class="chapter " data-level="1.16" data-path="core.html">
<a href="core.html">
Core
</a>
</li>
<li class="chapter " data-level="1.17" data-path="jms-core-mapping.html">
<a href="jms-core-mapping.html">
Mapping JMS Concepts to the Core API
</a>
</li>
<li class="chapter " data-level="1.18" data-path="using-jms.html">
<a href="using-jms.html">
Using JMS
</a>
</li>
<li class="chapter " data-level="1.19" data-path="client-classpath.html">
<a href="client-classpath.html">
The Client Classpath
</a>
<ul class="articles">
<li class="chapter " data-level="1.19.1" data-path="client-classpath-jms.html">
<a href="client-classpath-jms.html">
JMS
</a>
</li>
<li class="chapter " data-level="1.19.2" data-path="client-classpath-jakarta.html">
<a href="client-classpath-jakarta.html">
Jakarta
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.20" data-path="examples.html">
<a href="examples.html">
Examples
</a>
</li>
<li class="chapter " data-level="1.21" data-path="wildcard-routing.html">
<a href="wildcard-routing.html">
Routing Messages With Wild Cards
</a>
</li>
<li class="chapter " data-level="1.22" data-path="wildcard-syntax.html">
<a href="wildcard-syntax.html">
Wildcard Syntax
</a>
</li>
<li class="chapter " data-level="1.23" data-path="filter-expressions.html">
<a href="filter-expressions.html">
Filter Expressions
</a>
</li>
<li class="chapter " data-level="1.24" data-path="persistence.html">
<a href="persistence.html">
Persistence
</a>
</li>
<li class="chapter " data-level="1.25" data-path="configuring-transports.html">
<a href="configuring-transports.html">
Configuring Transports
</a>
</li>
<li class="chapter " data-level="1.26" data-path="config-reload.html">
<a href="config-reload.html">
Configuration Reload
</a>
</li>
<li class="chapter " data-level="1.27" data-path="connection-ttl.html">
<a href="connection-ttl.html">
Detecting Dead Connections
</a>
</li>
<li class="chapter " data-level="1.28" data-path="slow-consumers.html">
<a href="slow-consumers.html">
Detecting Slow Consumers
</a>
</li>
<li class="chapter " data-level="1.29" data-path="network-isolation.html">
<a href="network-isolation.html">
Avoiding Network Isolation
</a>
</li>
<li class="chapter " data-level="1.30" data-path="critical-analysis.html">
<a href="critical-analysis.html">
Detecting Broker Issues (Critical Analysis)
</a>
</li>
<li class="chapter " data-level="1.31" data-path="transaction-config.html">
<a href="transaction-config.html">
Resource Manager Configuration
</a>
</li>
<li class="chapter " data-level="1.32" data-path="flow-control.html">
<a href="flow-control.html">
Flow Control
</a>
</li>
<li class="chapter " data-level="1.33" data-path="send-guarantees.html">
<a href="send-guarantees.html">
Guarantees of sends and commits
</a>
</li>
<li class="chapter " data-level="1.34" data-path="undelivered-messages.html">
<a href="undelivered-messages.html">
Message Redelivery and Undelivered Messages
</a>
</li>
<li class="chapter " data-level="1.35" data-path="message-expiry.html">
<a href="message-expiry.html">
Message Expiry
</a>
</li>
<li class="chapter " data-level="1.36" data-path="large-messages.html">
<a href="large-messages.html">
Large Messages
</a>
</li>
<li class="chapter " data-level="1.37" data-path="paging.html">
<a href="paging.html">
Paging
</a>
</li>
<li class="chapter " data-level="1.38" data-path="scheduled-messages.html">
<a href="scheduled-messages.html">
Scheduled Messages
</a>
</li>
<li class="chapter " data-level="1.39" data-path="last-value-queues.html">
<a href="last-value-queues.html">
Last-Value Queues
</a>
</li>
<li class="chapter " data-level="1.40" data-path="non-destructive-queues.html">
<a href="non-destructive-queues.html">
Non-Destructive Queues
</a>
</li>
<li class="chapter " data-level="1.41" data-path="ring-queues.html">
<a href="ring-queues.html">
Ring Queues
</a>
</li>
<li class="chapter " data-level="1.42" data-path="retroactive-addresses.html">
<a href="retroactive-addresses.html">
Retroactive Addresses
</a>
</li>
<li class="chapter " data-level="1.43" data-path="exclusive-queues.html">
<a href="exclusive-queues.html">
Exclusive Queues
</a>
</li>
<li class="chapter " data-level="1.44" data-path="message-grouping.html">
<a href="message-grouping.html">
Message Grouping
</a>
</li>
<li class="chapter " data-level="1.45" data-path="consumer-priority.html">
<a href="consumer-priority.html">
Consumer Priority
</a>
</li>
<li class="chapter " data-level="1.46" data-path="pre-acknowledge.html">
<a href="pre-acknowledge.html">
Extra Acknowledge Modes
</a>
</li>
<li class="chapter " data-level="1.47" data-path="management.html">
<a href="management.html">
Management
</a>
</li>
<li class="chapter " data-level="1.48" data-path="management-console.html">
<a href="management-console.html">
Management Console
</a>
</li>
<li class="chapter " data-level="1.49" data-path="metrics.html">
<a href="metrics.html">
Metrics
</a>
</li>
<li class="chapter " data-level="1.50" data-path="security.html">
<a href="security.html">
Security
</a>
</li>
<li class="chapter " data-level="1.51" data-path="masking-passwords.html">
<a href="masking-passwords.html">
Masking Passwords
</a>
</li>
<li class="chapter " data-level="1.52" data-path="broker-plugins.html">
<a href="broker-plugins.html">
Broker Plugins
</a>
</li>
<li class="chapter " data-level="1.53" data-path="resource-limits.html">
<a href="resource-limits.html">
Resource Limits
</a>
</li>
<li class="chapter " data-level="1.54" data-path="jms-bridge.html">
<a href="jms-bridge.html">
The JMS Bridge
</a>
</li>
<li class="chapter " data-level="1.55" data-path="client-reconnection.html">
<a href="client-reconnection.html">
Client Reconnection and Session Reattachment
</a>
</li>
<li class="chapter " data-level="1.56" data-path="diverts.html">
<a href="diverts.html">
Diverting and Splitting Message Flows
</a>
</li>
<li class="chapter " data-level="1.57" data-path="core-bridges.html">
<a href="core-bridges.html">
Core Bridges
</a>
</li>
<li class="chapter " data-level="1.58" data-path="transformers.html">
<a href="transformers.html">
Transformers
</a>
</li>
<li class="chapter " data-level="1.59" data-path="duplicate-detection.html">
<a href="duplicate-detection.html">
Duplicate Message Detection
</a>
</li>
<li class="chapter " data-level="1.60" data-path="clusters.html">
<a href="clusters.html">
Clusters
</a>
</li>
<li class="chapter " data-level="1.61" data-path="federation.html">
<a href="federation.html">
Federation
</a>
<ul class="articles">
<li class="chapter " data-level="1.61.1" data-path="federation-address.html">
<a href="federation-address.html">
Address Federation
</a>
</li>
<li class="chapter " data-level="1.61.2" data-path="federation-queue.html">
<a href="federation-queue.html">
Queue Federation
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.62" data-path="ha.html">
<a href="ha.html">
High Availability and Failover
</a>
</li>
<li class="chapter " data-level="1.63" data-path="connection-routers.html">
<a href="connection-routers.html">
Connection Routers
</a>
</li>
<li class="chapter " data-level="1.64" data-path="graceful-shutdown.html">
<a href="graceful-shutdown.html">
Graceful Server Shutdown
</a>
</li>
<li class="chapter " data-level="1.65" data-path="libaio.html">
<a href="libaio.html">
Libaio Native Libraries
</a>
</li>
<li class="chapter " data-level="1.66" data-path="thread-pooling.html">
<a href="thread-pooling.html">
Thread management
</a>
</li>
<li class="chapter " data-level="1.67" data-path="web-server.html">
<a href="web-server.html">
Embedded Web Server
</a>
</li>
<li class="chapter " data-level="1.68" data-path="logging.html">
<a href="logging.html">
Logging
</a>
</li>
<li class="chapter " data-level="1.69" data-path="rest.html">
<a href="rest.html">
REST Interface
</a>
</li>
<li class="chapter " data-level="1.70" data-path="embedding-activemq.html">
<a href="embedding-activemq.html">
Embedding the Broker
</a>
</li>
<li class="chapter " data-level="1.71" data-path="karaf.html">
<a href="karaf.html">
Apache Karaf
</a>
</li>
<li class="chapter " data-level="1.72" data-path="tomcat.html">
<a href="tomcat.html">
Apache Tomcat
</a>
</li>
<li class="chapter " data-level="1.73" data-path="spring-integration.html">
<a href="spring-integration.html">
Spring Integration
</a>
</li>
<li class="chapter " data-level="1.74" data-path="cdi-integration.html">
<a href="cdi-integration.html">
CDI Integration
</a>
</li>
<li class="chapter " data-level="1.75" data-path="intercepting-operations.html">
<a href="intercepting-operations.html">
Intercepting Operations
</a>
</li>
<li class="chapter " data-level="1.76" data-path="data-tools.html">
<a href="data-tools.html">
Data Tools
</a>
</li>
<li class="chapter " data-level="1.77" data-path="activation-tools.html">
<a href="activation-tools.html">
Activation Tools
</a>
</li>
<li class="chapter " data-level="1.78" data-path="maven-plugin.html">
<a href="maven-plugin.html">
Maven Plugin
</a>
</li>
<li class="chapter " data-level="1.79" data-path="unit-testing.html">
<a href="unit-testing.html">
Unit Testing
</a>
</li>
<li class="chapter " data-level="1.80" data-path="perf-tuning.html">
<a href="perf-tuning.html">
Troubleshooting and Performance Tuning
</a>
</li>
<li class="chapter active" data-level="1.81" data-path="perf-tools.html">
<a href="perf-tools.html">
Performance Tools
</a>
</li>
<li class="chapter " data-level="1.82" data-path="configuration-index.html">
<a href="configuration-index.html">
Configuration Reference
</a>
</li>
<li class="chapter " data-level="1.83" data-path="restart-sequence.html">
<a href="restart-sequence.html">
Restart Sequence
</a>
</li>
<li class="divider"></li>
<li>
<a href="https://www.gitbook.com" target="blank" class="gitbook-link">
Published with GitBook
</a>
</li>
</ul>
</nav>
</div>
<div class="book-body">
<div class="body-inner">
<div class="book-header" role="navigation">
<!-- Title -->
<h1>
<i class="fa fa-circle-o-notch fa-spin"></i>
<a href="." >Performance Tools</a>
</h1>
</div>
<div class="page-wrapper" tabindex="-1" role="main">
<div class="page-inner">
<div id="book-search-results">
<div class="search-noresults">
<section class="normal markdown-section">
<h1 id="artemis-perf-commands">Artemis <code>perf</code> commands</h1>
<p>Artemis provides some built-in performance test tools based on the <a href="https://javaee.github.io/jms-spec/pages/JMS20FinalRelease" target="_blank">JMS 2 API</a>
to help users (and developers) to stress test a configured Artemis broker instance in different scenarios.</p>
<p>These command-line tools won&apos;t represent a full-fat benchmark (such as <a href="https://openmessaging.cloud/docs/benchmarks/" target="_blank">Open Messaging</a>),
but can be used as building blocks to produce one. They are also quite useful on their own.</p>
<p>In summary, the provided <code>perf</code> tools are:</p>
<ol>
<li><code>producer</code> tool: it can generate both all-out throughput or target-rate load, using <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/bytesmessage" target="_blank">BytesMessage</a> of a configured size</li>
<li><code>consumer</code> tool: it uses a <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/messagelistener" target="_blank">MessageListener</a> to consume messages sent by the <code>producer</code> command</li>
<li><code>client</code> tools: it packs both tools as a single command</li>
</ol>
<p>Most users will just need the <code>client</code> tool, but the <code>producer</code> and <code>consumer</code> tools allow performing tests in additional scenario(s):</p>
<ul>
<li>delaying consumer start, in order to cause the broker to page</li>
<li>running producers and consumers on different machines </li>
<li>...</li>
</ul>
<p>The examples below (running on a <code>64 bit Linux 5.14 with Intel&#xAE; Core&#x2122; i7-9850H CPU @ 2.60GHz &#xD7; 12 with Turbo Boost disabled, 32 GB of RAM and SSD</code>)
show different use cases of increasing complexity. As they progress, some internal architectural details of the tool and the configuration options supported, are explored.</p>
<blockquote>
<p><strong>Note:</strong><br>The tools can run both from within the broker instance&apos;s folder or
from the base artemis <code>bin</code> folder.
In the former case it will use the same JVM parameter configured on the instance (on <code>artemis.profile</code>),
while in the latter case the user should set <code>JAVA_ARGS</code> environment variable to override default heap and GC parameters</p>
<p>ie <code>-XX:+UseParallelGC -Xms512M -Xmx1024M</code></p>
</blockquote>
<h2 id="case-1-single-producer-single-consumer-over-a-queue">Case 1: Single producer Single consumer over a queue</h2>
<p>This is the simplest possible case: running a load test with 1 producer and 1 consumer on a non-durable queue <code>TEST_QUEUE</code>,
using <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/deliverymode#NON_PERSISTENT" target="_blank">non-persistent</a>
1024 bytes long (by default) messages, using <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/session#AUTO_ACKNOWLEDGE" target="_blank">auto-acknowledge</a>. </p>
<p>Let&apos;s see what happens after typing:</p>
<pre><code class="lang-bash">$ ./artemis perf client queue://TEST_QUEUE
Connection brokerURL = tcp://localhost:61616
2022-01-18 10:30:54,535 WARN [org.apache.activemq.artemis.core.client] AMQ212053: CompletionListener/SendAcknowledgementHandler used with confirmationWindowSize=-1. Enable confirmationWindowSize to receive acks from server!
--- warmup <span class="hljs-literal">false</span>
--- sent: 7316 msg/sec
--- blocked: 6632 msg/sec
--- completed: 7320 msg/sec
--- received: 7317 msg/sec
<span class="hljs-comment"># ...</span>
</code></pre>
<p>The test keeps on running, until <code>SIGTERM</code> or <code>SIGINT</code> signals are sent to the Java process (on Linux Console it translates into pressing <strong>CTRL + C</strong>).
Before looking what the metrics mean, there&apos;s an initial <code>WARN</code> log that shouldn&apos;t be ignored:</p>
<pre><code class="lang-bash">WARN [org.apache.activemq.artemis.core.client] AMQ212053: CompletionListener/SendAcknowledgementHandler used with confirmationWindowSize=-1. Enable confirmationWindowSize to receive acks from server!
</code></pre>
<p>It shows two things:</p>
<ol>
<li>the load generator uses <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/messageproducer#send-javax.jms.Destination-javax.jms.Message-javax.jms.CompletionListener-" target="_blank">async message producers</a></li>
<li><code>confirmationWindowSize</code> is an Artemis CORE protocol specific setting; the <code>perf</code> commands uses CORE as the default JMS provider</li>
</ol>
<h3 id="live-latency-console-reporting">Live Latency Console Reporting</h3>
<p>The <code>perf client</code> command can report on Console different latency percentiles metrics by adding <code>--show-latency</code> to the command arguments, but in order to obtain meaningful metrics, we need to address <code>WARN</code> by setting <code>confirmationWindowSize</code> on the producer <code>url</code>,
setting <code>--consumer-url</code> to save applying the same configuration for consumer(s). </p>
<p>In short, the command is using these additional parameters:</p>
<pre><code class="lang-bash">--show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616
</code></pre>
<h4 id="running-it">Running it</h4>
<pre><code class="lang-bash">$ ./artemis perf client --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 queue://TEST_QUEUE
--- warmup <span class="hljs-literal">false</span>
--- sent: 8114 msg/sec
--- blocked: 8114 msg/sec
--- completed: 8114 msg/sec
--- received: 8113 msg/sec
--- send ack time: mean: 113.01 us - 50.00%: 106.00 us - 90.00%: 142.00 us - 99.00%: 204.00 us - 99.90%: 371.00 us - 99.99%: 3455.00 us - max: 3455.00 us
--- transfer time: mean: 213.71 us - 50.00%: 126.00 us - 90.00%: 177.00 us - 99.00%: 3439.00 us - 99.90%: 7967.00 us - 99.99%: 8895.00 us - max: 8895.00 us
<span class="hljs-comment"># CTRL + C pressed</span>
--- SUMMARY
--- result: success
--- total sent: 70194
--- total blocked: 70194
--- total completed: 70194
--- total received: 70194
--- aggregated send time: mean: 101.53 us - 50.00%: 86.00 us - 90.00%: 140.00 us - 99.00%: 283.00 us - 99.90%: 591.00 us - 99.99%: 2007.00 us - max: 24959.00 us
--- aggregated transfer time: mean: 127.48 us - 50.00%: 97.00 us - 90.00%: 166.00 us - 99.00%: 449.00 us - 99.90%: 4671.00 us - 99.99%: 8255.00 us - max: 27263.00 us
</code></pre>
<p>Some notes:</p>
<ol>
<li><code>WARN</code> message is now gone</li>
<li><code>send ack time</code> and <code>transfer time</code> statistics are printed at second interval</li>
<li><code>total</code> and <code>aggregated</code> metrics are printed on test completion (more on this later)</li>
</ol>
<p>The meaning of the live latency statistics are:</p>
<ul>
<li><code>send ack time</code>: percentiles of latency to acknowledge sent messages</li>
<li><code>transfer time</code>: percentiles of latency to transfer messages from producer(s) to consumer(s)</li>
</ul>
<p>The <code>perf</code> commands uses <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/messageproducer#send-javax.jms.Destination-javax.jms.Message-javax.jms.CompletionListener-" target="_blank">JMS 2 async message producers</a>
that allow the load generator to accumulate in-flight sent messages and depending on the protocol implementation, may block its producer thread due to producer flow control.
e.g: the Artemis CORE protocol can block producers threads to refill producers credits, while the <a href="https://qpid.apache.org/components/jms/index.html" target="_blank">QPID-JMS</a> won&apos;t.</p>
<p>The <code>perf</code> tool is implementing its own in-flight sent requests tracking and can be configured to limit the amount of pending sent messages,
while reporting the rate by which producers are &quot;blocked&quot; awaiting completions </p>
<blockquote>
<p><strong>Producers threads are <code>blocked</code>?</strong><br>Although the load back-pressure mechanism is non-blocking, given that the load generator cannot push further load while back-pressured
by the protocol client, the load is semantically &quot;blocked&quot;.
This detail is relevant to explain the live rate <a href="#running-it">statistics</a> on Console:</p>
</blockquote>
<p>By default, the <code>perf</code> tools (i.e: <code>client</code> and <code>producer</code>) <strong>limits the number of in-flight request to 1</strong>: to change the default setting
users should add <code>--max-pending</code> parameter configuration.</p>
<blockquote>
<p><strong>Note:</strong><br>Setting <code>--max-pending 0</code> will disable the load generator in-flight sent messages limiter, allowing the tool to accumulate
an unbounded number of in-flight messages, risking <code>OutOfMemoryError</code>.<br>This is <strong>NOT RECOMMENDED!</strong></p>
</blockquote>
<p>More detail on the metrics:</p>
<ul>
<li><code>warmup</code>: the generator phase while the statistics sample is collected; warmup duration can be set by setting <code>--warmup</code></li>
<li><code>sent</code>: the message sent rate</li>
<li><code>blocked</code>: the rate of attempts to send a new message, &quot;blocked&quot; awaiting <code>--max-pending</code> refill</li>
<li><code>completed</code>: the rate of message send acknowledgements received by producer(s)</li>
<li><code>received</code>: the rate of messages received by consumer(s)</li>
</ul>
<h3 id="how-to-read-the-live-statistics">How to read the live statistics?</h3>
<p>The huge amount of <code>blocked</code> vs <code>sent</code> means that the broker wasn&apos;t fast enough to refill the single <code>--max-pending</code> budget
before sending a new message.<br>It can be changed into:</p>
<pre><code class="lang-bash">--max-pending 100
</code></pre>
<h5 id="to-our-previous-command">to our previous command:</h5>
<pre><code class="lang-bash">$ ./artemis perf client --warmup 20 --max-pending 100 --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 queue://TEST_QUEUE
Connection brokerURL = tcp://localhost:61616?confirmationWindowSize=20000
<span class="hljs-comment"># first samples shows very BAD performance because client JVM is still warming up</span>
--- warmup <span class="hljs-literal">true</span>
--- sent: 27366 msg/sec
--- blocked: 361 msg/sec
--- completed: 27305 msg/sec
--- received: 26195 msg/sec
--- send ack time: mean: 1743.39 us - 50.00%: 1551.00 us - 90.00%: 3119.00 us - 99.00%: 5215.00 us - 99.90%: 8575.00 us - 99.99%: 8703.00 us - max: 23679.00 us
--- transfer time: mean: 11860.32 us - 50.00%: 11583.00 us - 90.00%: 18559.00 us - 99.00%: 24319.00 us - 99.90%: 31359.00 us - 99.99%: 31615.00 us - max: 31615.00 us
<span class="hljs-comment"># ... &gt; 20 seconds later ...</span>
<span class="hljs-comment"># performance is now way better then during warmup</span>
--- warmup <span class="hljs-literal">false</span>
--- sent: 86525 msg/sec
--- blocked: 5734 msg/sec
--- completed: 86525 msg/sec
--- received: 86556 msg/sec
--- send ack time: mean: 1109.13 us - 50.00%: 1103.00 us - 90.00%: 1447.00 us - 99.00%: 1687.00 us - 99.90%: 5791.00 us - 99.99%: 5983.00 us - max: 5983.00 us
--- transfer time: mean: 4662.94 us - 50.00%: 1679.00 us - 90.00%: 12159.00 us - 99.00%: 14079.00 us - 99.90%: 14527.00 us - 99.99%: 14783.00 us - max: 14783.00 us
<span class="hljs-comment"># CTRL + C</span>
--- SUMMARY
--- result: success
--- total sent: 3450389
--- total blocked: 168863
--- total completed: 3450389
--- total received: 3450389
--- aggregated send time: mean: 1056.09 us - 50.00%: 1003.00 us - 90.00%: 1423.00 us - 99.00%: 1639.00 us - 99.90%: 4287.00 us - 99.99%: 7103.00 us - max: 19583.00 us
--- aggregated transfer time: mean: 18647.51 us - 50.00%: 10751.00 us - 90.00%: 54271.00 us - 99.00%: 84991.00 us - 99.90%: 90111.00 us - 99.99%: 93183.00 us - max: 94207.00 us
</code></pre>
<p>Some notes on the results:</p>
<ul>
<li>we now have a reasonable <code>blocked/sent</code> ratio (&lt; ~10%) </li>
<li>sent rate has improved <strong>ten-fold</strong> if compared to <a href="#running-it">previous results</a></li>
</ul>
<p>And on the <code>SUMMARY</code> statistics:</p>
<ul>
<li><code>total</code> counters include measurements collected with <code>warmup true</code></li>
<li><code>aggregated</code> latencies <strong>don&apos;t</strong> include measurements collected with <code>warmup true</code></li>
</ul>
<h3 id="how-to-compare-latencies-across-tests">How to compare latencies across tests?</h3>
<p>The Console output format isn&apos;t designed for easy latency comparisons, however the
<code>perf</code> commands expose <code>--hdr &lt;hdr file name&gt;</code> parameter to produce a <a href="http://hdrhistogram.org/" target="_blank">HDR Histogram</a> compatible report that can be opened with different visualizers<br>eg <a href="https://hdrhistogram.github.io/HdrHistogramJSDemo/logparser.html" target="_blank">Online HdrHistogram Log Analyzer</a>, <a href="https://github.com/ennerf/HdrHistogramVisualizer" target="_blank">HdrHistogramVisualizer</a> or <a href="https://github.com/HdrHistogram/HistogramLogAnalyzer" target="_blank">HistogramLogAnalyzer</a>. </p>
<blockquote>
<p><strong>Note:</strong><br>Any latency collected trace on this guide is going to use <a href="https://hdrhistogram.github.io/HdrHistogramJSDemo/logparser.html" target="_blank">Online HdrHistogram Log Analyzer</a>
as HDR Histogram visualizer tool.</p>
</blockquote>
<p>Below is the visualization of the HDR histograms collected while adding to the previous benchmark</p>
<pre><code class="lang-bash">--hdr /tmp/non_durable_queue.hdr
</code></pre>
<p>Whole test execution shows tagged latencies, to distinguish <code>warmup</code> ones:</p>
<p><img src="images/test.png" alt="test"></p>
<p>Filtering out <code>warmup</code> latencies, it looks like</p>
<p><img src="images/hot_test.png" alt="hot test"></p>
<p>Latency results shows that at higher percentiles <code>transfer</code> latency is way higher than the <code>sent</code> one
(reminder: <code>sent</code> it&apos;s the time to acknowledge sent messages), probably meaning that some queuing-up is happening on the broker.</p>
<p>In order to test this theory we switch to <strong>target rate tests</strong>.</p>
<h2 id="case-2-target-rate-single-producer-single-consumer-over-a-queue">Case 2: Target Rate Single producer Single consumer over a queue</h2>
<p><code>perf client</code> and <code>perf producer</code> tools allow specifying a target rate to schedule producer(s) requests: adding </p>
<pre><code class="lang-bash">--rate &lt;msg/sec <span class="hljs-built_in">integer</span> value&gt;
</code></pre>
<p>The previous example <a href="#to-our-previous-command">last run</a> shows that <code>--max-pending 100</code> guarantees &lt; 10% blocked/sent messages with
aggregated latencies</p>
<pre><code class="lang-bash">--- aggregated send time: mean: 1056.09 us - 50.00%: 1003.00 us - 90.00%: 1423.00 us - 99.00%: 1639.00 us - 99.90%: 4287.00 us - 99.99%: 7103.00 us - max: 19583.00 us
--- aggregated transfer time: mean: 18647.51 us - 50.00%: 10751.00 us - 90.00%: 54271.00 us - 99.00%: 84991.00 us - 99.90%: 90111.00 us - 99.99%: 93183.00 us - max: 94207.00 us
</code></pre>
<p>We would like to lower <code>transfer time</code> sub-millisecond; let&apos;s try
by running a load test with ~30% of the max perceived sent rate, by setting:</p>
<pre><code class="lang-bash">--rate 30000 --hdr /tmp/30K.hdr
</code></pre>
<p>The whole command is then:</p>
<pre><code class="lang-bash">$ ./artemis perf client --rate 30000 --hdr /tmp/30K.hdr --warmup 20 --max-pending 100 --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 queue://TEST_QUEUE
<span class="hljs-comment"># ... after 20 warmup seconds ...</span>
--- warmup <span class="hljs-literal">false</span>
--- sent: 30302 msg/sec
--- blocked: 0 msg/sec
--- completed: 30302 msg/sec
--- received: 30303 msg/sec
--- send delay time: mean: 24.20 us - 50.00%: 21.00 us - 90.00%: 54.00 us - 99.00%: 72.00 us - 99.90%: 233.00 us - 99.99%: 659.00 us - max: 731.00 us
--- send ack time: mean: 150.48 us - 50.00%: 120.00 us - 90.00%: 172.00 us - 99.00%: 1223.00 us - 99.90%: 2543.00 us - 99.99%: 3183.00 us - max: 3247.00 us
--- transfer time: mean: 171.53 us - 50.00%: 135.00 us - 90.00%: 194.00 us - 99.00%: 1407.00 us - 99.90%: 2607.00 us - 99.99%: 3151.00 us - max: 3183.00 us
<span class="hljs-comment"># CTRL + C</span>
--- SUMMARY
--- result: success
--- total sent: 1216053
--- total blocked: 845
--- total completed: 1216053
--- total received: 1216053
--- aggregated delay send time: mean: 35.84 us - 50.00%: 20.00 us - 90.00%: 55.00 us - 99.00%: 116.00 us - 99.90%: 3359.00 us - 99.99%: 5503.00 us - max: 6495.00 us
--- aggregated send time: mean: 147.38 us - 50.00%: 117.00 us - 90.00%: 165.00 us - 99.00%: 991.00 us - 99.90%: 4191.00 us - 99.99%: 5695.00 us - max: 7103.00 us
--- aggregated transfer time: mean: 178.48 us - 50.00%: 134.00 us - 90.00%: 188.00 us - 99.00%: 1359.00 us - 99.90%: 5471.00 us - 99.99%: 8831.00 us - max: 12799.00 us
</code></pre>
<p>We&apos;ve now achieved sub-millisecond <code>transfer</code> latencies until <code>90.00 pencentile</code>.<br>Opening <code>/tmp/30K.hdr</code> makes easier to see it:</p>
<p><img src="images/30K.png" alt="test"></p>
<p>Now <code>send</code> and <code>transfer</code> time looks quite similar and there&apos;s no sign of queueing, but...</p>
<h3 id="what-delay-send-time-means">What <code>delay send time</code> means?</h3>
<p>This metric is borrowed from the <a href="http://highscalability.com/blog/2015/10/5/your-load-generator-is-probably-lying-to-you-take-the-red-pi.html" target="_blank">Coordinated Omission</a> concept,
and it measures the delay of producer(s) while trying to send messages at the requested rate.</p>
<p>The source of such delay could be:</p>
<ul>
<li>slow responding broker: the load generator reached <code>--max-pending</code> and the expected rate cannot be honored</li>
<li>client running out of resources (lack of CPU time, GC pauses, etc etc): load generator cannot keep-up with the expected rate because it is just &quot;too fast&quot; for it</li>
<li>protocol-dependent blocking behaviours: CORE JMS 2 async send can block due to <code>producerWindowSize</code> exhaustion</li>
</ul>
<p>A sane run of a target rate test should keep <code>delay send time</code> under control or investigation actions must be taken
to understand what&apos;s the source of the delay.<br>Let&apos;s show it with an example: we&apos;ve already checked the all-out rate of the broker ie ~90K msg/sec</p>
<p>By running a <code>--rate 90000</code> test under the same conditions, latencies will look as</p>
<p><img src="images/90K.png" alt="test"></p>
<p>It clearly shows that the load generator is getting delayed and cannot keep-up with the expected rate.</p>
<p>Below is a more complex example involving destinations (auto)generation with &quot;asymmetric&quot; load i.e: the producer number is different from consumer number.</p>
<h2 id="case-3-target-rate-load-on-10-durable-topics-each-with-3-producers-and-2-unshared-consumers">Case 3: Target Rate load on 10 durable topics, each with 3 producers and 2 unshared consumers</h2>
<p>The <code>perf</code> tool can auto generate destinations using</p>
<pre><code class="lang-bash">--num-destinations &lt;number of destinations to generate&gt;
</code></pre>
<p>and naming them by using the destination name specified as the seed and an ordered sequence suffix.</p>
<p>eg</p>
<pre><code class="lang-bash">--num-destinations 3 topic://TOPIC
</code></pre>
<p>would generate 3 topics: <code>TOPIC0</code>, <code>TOPIC1</code>, <code>TOPIC2</code>.</p>
<p>With the default configuration (without specifying <code>--num-destinations</code>) it would just create <code>TOPIC</code>, without any numerical suffix.</p>
<p>In order to create a load generation on 10 topics, <strong>each</strong> with 3 producers and 2 unshared consumers: </p>
<pre><code class="lang-bash">--producers 3 --consumers 2 --num-destinations 10 topic://TOPIC
</code></pre>
<p>The whole <code>perf client</code> all-out throughput command would be:</p>
<pre><code class="lang-bash"><span class="hljs-comment"># same as in the previous cases</span>
./artemis perf client --warmup 20 --max-pending 100 -<span class="hljs-_">-s</span>
how-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 \
--producers 3 --consumers 2 --num-destinations 10 --durable --persistent topic://DURABLE_TOPIC
<span class="hljs-comment"># this last part above is new</span>
</code></pre>
<p>and it would print...</p>
<pre><code class="lang-bash">javax.jms.IllegalStateException: Cannot create durable subscription - client ID has not been <span class="hljs-built_in">set</span>
</code></pre>
<p>Given that the generator is creating <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/session#createDurableConsumer-javax.jms.Topic-java.lang.String-" target="_blank">unshared durable Topic subscriptions</a>, is it
mandatory to set a ClientID for each connection used.</p>
<p>The <code>perf client</code> tool creates a connection for each consumer by default and auto-generates both ClientIDs
and subscriptions names (as required by the <a href="https://jakarta.ee/specifications/messaging/2.0/apidocs/javax/jms/session#createDurableConsumer-javax.jms.Topic-java.lang.String-" target="_blank">unshared durable Topic subscriptions API</a>).
ClientID still requires users to specify Client ID prefixes with <code>--clientID &lt;Client ID prefix&gt;</code> and takes care to unsubscribe the consumers on test completion.</p>
<p>The complete commands now looks like:</p>
<pre><code class="lang-bash">./artemis perf client --warmup 20 --max-pending 100 --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 \
--producers 3 --consumers 2 --num-destinations 10 --durable --persistent topic://DURABLE_TOPIC --clientID <span class="hljs-built_in">test</span>_id
<span class="hljs-comment"># after few seconds</span>
--- warmup <span class="hljs-literal">false</span>
--- sent: 74842 msg/sec
--- blocked: 2702 msg/sec
--- completed: 74641 msg/sec
--- received: 146412 msg/sec
--- send ack time: mean: 37366.13 us - 50.00%: 37119.00 us - 90.00%: 46079.00 us - 99.00%: 68095.00 us - 99.90%: 84479.00 us - 99.99%: 94719.00 us - max: 95743.00 us
--- transfer time: mean: 44060.66 us - 50.00%: 43263.00 us - 90.00%: 54527.00 us - 99.00%: 75775.00 us - 99.90%: 87551.00 us - 99.99%: 91135.00 us - max: 91135.00 us
<span class="hljs-comment"># CTRL + C</span>
--- SUMMARY
--- result: success
--- total sent: 2377653
--- total blocked: 80004
--- total completed: 2377653
--- total received: 4755306
--- aggregated send time: mean: 39423.69 us - 50.00%: 38911.00 us - 90.00%: 49663.00 us - 99.00%: 66047.00 us - 99.90%: 85503.00 us - 99.99%: 101887.00 us - max: 115711.00 us
--- aggregated transfer time: mean: 46216.99 us - 50.00%: 45311.00 us - 90.00%: 57855.00 us - 99.00%: 78335.00 us - 99.90%: 97791.00 us - 99.99%: 113151.00 us - max: 125439.00 us
</code></pre>
<p>Results shows that <code>tranfer time</code> isn&apos;t queuing up, meaning that subscribers are capable to keep-up with the producers: hence a reasonable
rate to test could be ~80% of the perceived <code>sent</code> rate ie <code>--rate 60000</code>:</p>
<pre><code class="lang-bash">./artemis perf client --warmup 20 --max-pending 100 --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 \
--producers 3 --consumers 2 --num-destinations 10 --durable --persistent topic://DURABLE_TOPIC --clientID <span class="hljs-built_in">test</span>_id \
--rate 60000
<span class="hljs-comment"># after many seconds while running</span>
--- warmup <span class="hljs-literal">false</span>
--- sent: 55211 msg/sec
--- blocked: 2134 msg/sec
--- completed: 54444 msg/sec
--- received: 111622 msg/sec
--- send delay time: mean: 6306710.04 us - 50.00%: 6094847.00 us - 90.00%: 7766015.00 us - 99.00%: 8224767.00 us - 99.90%: 8257535.00 us - 99.99%: 8257535.00 us - max: 8257535.00 us
--- send ack time: mean: 50072.92 us - 50.00%: 50431.00 us - 90.00%: 57855.00 us - 99.00%: 65023.00 us - 99.90%: 71167.00 us - 99.99%: 71679.00 us - max: 71679.00 us
--- transfer time: mean: 63672.92 us - 50.00%: 65535.00 us - 90.00%: 78847.00 us - 99.00%: 86015.00 us - 99.90%: 90623.00 us - 99.99%: 93183.00 us - max: 94719.00 us
<span class="hljs-comment"># it won&apos;t get any better :(</span>
</code></pre>
<p>What&apos;s wrong with the <code>send delay time</code>?<br>Results show that the load generator cannot keep up with the expected rate and it&apos;s accumulating a huge delay
on the expected scheduled load: lets trying fixing it by adding more producers
threads, adding </p>
<pre><code class="lang-bash">--threads &lt;producer threads&gt;
</code></pre>
<p>By using two producers threads, the command now looks like:</p>
<pre><code class="lang-bash">./artemis perf client --warmup 20 --max-pending 100 --show-latency --url tcp://localhost:61616?confirmationWindowSize=20000 --consumer-url tcp://localhost:61616 \
--producers 3 --consumers 2 --num-destinations 10 --durable --persistent topic://DURABLE_TOPIC --clientID <span class="hljs-built_in">test</span>_id \
--rate 60000 --threads 2
<span class="hljs-comment"># after few seconds warming up....</span>
--- warmup <span class="hljs-literal">false</span>
--- sent: 59894 msg/sec
--- blocked: 694 msg/sec
--- completed: 58925 msg/sec
--- received: 114857 msg/sec
--- send delay time: mean: 3189.96 us - 50.00%: 277.00 us - 90.00%: 10623.00 us - 99.00%: 35583.00 us - 99.90%: 47871.00 us - 99.99%: 56063.00 us - max: 58367.00 us
--- send ack time: mean: 31500.93 us - 50.00%: 31231.00 us - 90.00%: 48383.00 us - 99.00%: 65535.00 us - 99.90%: 83455.00 us - 99.99%: 95743.00 us - max: 98303.00 us
--- transfer time: mean: 38151.21 us - 50.00%: 37119.00 us - 90.00%: 55807.00 us - 99.00%: 84479.00 us - 99.90%: 104959.00 us - 99.99%: 118271.00 us - max: 121855.00 us
</code></pre>
<p><code>send delay time</code> now seems under control, meaning that the load generator need some tuning in order to work at its best.</p>
</section>
</div>
<div class="search-results">
<div class="has-results">
<h1 class="search-results-title"><span class='search-results-count'></span> results matching "<span class='search-query'></span>"</h1>
<ul class="search-results-list"></ul>
</div>
<div class="no-results">
<h1 class="search-results-title">No results matching "<span class='search-query'></span>"</h1>
</div>
</div>
</div>
</div>
</div>
</div>
<a href="perf-tuning.html" class="navigation navigation-prev " aria-label="Previous page: Troubleshooting and Performance Tuning">
<i class="fa fa-angle-left"></i>
</a>
<a href="configuration-index.html" class="navigation navigation-next " aria-label="Next page: Configuration Reference">
<i class="fa fa-angle-right"></i>
</a>
</div>
<script>
var gitbook = gitbook || [];
gitbook.push(function() {
gitbook.page.hasChanged({"page":{"title":"Performance Tools","level":"1.81","depth":1,"next":{"title":"Configuration Reference","level":"1.82","depth":1,"path":"configuration-index.md","ref":"configuration-index.md","articles":[]},"previous":{"title":"Troubleshooting and Performance Tuning","level":"1.80","depth":1,"path":"perf-tuning.md","ref":"perf-tuning.md","articles":[]},"dir":"ltr"},"config":{"plugins":[],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"highlight":{},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"fontsettings":{"theme":"white","family":"sans","size":2},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"github":"apache/activemq-artemis","theme":"default","githubHost":"https://github.com/","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"version":"2.24.0","title":"ActiveMQ Artemis Documentation","links":{"home":"http://activemq.apache.org/artemis","issues":"https://issues.apache.org/jira/browse/ARTEMIS","contribute":"http://activemq.apache.org/contributing.html"},"gitbook":"3.x.x","description":"ActiveMQ Artemis User Guide and Reference Documentation"},"file":{"path":"perf-tools.md","mtime":"2022-08-08T16:17:30.749Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2022-08-08T16:18:17.607Z"},"basePath":".","book":{"language":""}});
});
</script>
</div>
<script src="gitbook/gitbook.js"></script>
<script src="gitbook/theme.js"></script>
<script src="gitbook/gitbook-plugin-search/search-engine.js"></script>
<script src="gitbook/gitbook-plugin-search/search.js"></script>
<script src="gitbook/gitbook-plugin-lunr/lunr.min.js"></script>
<script src="gitbook/gitbook-plugin-lunr/search-lunr.js"></script>
<script src="gitbook/gitbook-plugin-sharing/buttons.js"></script>
<script src="gitbook/gitbook-plugin-fontsettings/fontsettings.js"></script>
</body>
</html>